repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
sgp715/python_speaks | sr/__main__.py | 6 | 1181 | import speech_recognition as sr
r = sr.Recognizer()
m = sr.Microphone()
try:
print("A moment of silence, please...")
with m as source: r.adjust_for_ambient_noise(source)
print("Set minimum energy threshold to {}".format(r.energy_threshold))
while True:
print("Say something!")
with m as source: audio = r.listen(source)
print("Got it! Now to recognize it...")
try:
# recognize speech using Google Speech Recognition
value = r.recognize_google(audio)
# we need some special handling here to correctly print unicode characters to standard output
if str is bytes: # this version of Python uses bytes for strings (Python 2)
print(u"You said {}".format(value).encode("utf-8"))
else: # this version of Python uses unicode for strings (Python 3+)
print("You said {}".format(value))
except sr.UnknownValueError:
print("Oops! Didn't catch that")
except sr.RequestError as e:
print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e))
except KeyboardInterrupt:
pass
| gpl-2.0 |
admiyo/python-nss | test/test_cipher.py | 1 | 6260 | #!/usr/bin/python
import sys
import os
import unittest
import nss.nss as nss
#-------------------------------------------------------------------------------
verbose = False
mechanism = nss.CKM_DES_CBC_PAD
plain_text = "Encrypt me!"
key = "e8:a7:7c:e2:05:63:6a:31"
iv = "e4:bb:3b:d3:c3:71:2e:58"
in_filename = sys.argv[0]
chunk_size = 128
#-------------------------------------------------------------------------------
def setup_contexts(mechanism, key, iv):
# Get a PK11 slot based on the cipher
slot = nss.get_best_slot(mechanism)
# If key was supplied use it, otherwise generate one
if key:
if verbose:
print "using supplied key data"
print "key:\n%s" % (key)
key_si = nss.SecItem(nss.read_hex(key))
sym_key = nss.import_sym_key(slot, mechanism, nss.PK11_OriginUnwrap,
nss.CKA_ENCRYPT, key_si)
else:
if verbose:
print "generating key data"
sym_key = slot.key_gen(mechanism, None, slot.get_best_key_length(mechanism))
# If initialization vector was supplied use it, otherwise set it to None
if iv:
if verbose:
print "supplied iv:\n%s" % (iv)
iv_data = nss.read_hex(iv)
iv_si = nss.SecItem(iv_data)
iv_param = nss.param_from_iv(mechanism, iv_si)
else:
iv_length = nss.get_iv_length(mechanism)
if iv_length > 0:
iv_data = nss.generate_random(iv_length)
iv_si = nss.SecItem(iv_data)
iv_param = nss.param_from_iv(mechanism, iv_si)
if verbose:
print "generated %d byte initialization vector: %s" % \
(iv_length, nss.data_to_hex(iv_data, separator=":"))
else:
iv_param = None
# Create an encoding context
encoding_ctx = nss.create_context_by_sym_key(mechanism, nss.CKA_ENCRYPT,
sym_key, iv_param)
# Create a decoding context
decoding_ctx = nss.create_context_by_sym_key(mechanism, nss.CKA_DECRYPT,
sym_key, iv_param)
return encoding_ctx, decoding_ctx
#-------------------------------------------------------------------------------
class TestCipher(unittest.TestCase):
def setUp(self):
nss.nss_init_nodb()
self.encoding_ctx, self.decoding_ctx = setup_contexts(mechanism, key, iv)
def tearDown(self):
del self.encoding_ctx
del self.decoding_ctx
nss.nss_shutdown()
def test_string(self):
if verbose:
print "Plain Text:\n%s" % (plain_text)
# Encode the plain text by feeding it to cipher_op getting cipher text back.
# Append the final bit of cipher text by calling digest_final
cipher_text = self.encoding_ctx.cipher_op(plain_text)
cipher_text += self.encoding_ctx.digest_final()
if verbose:
print "Cipher Text:\n%s" % (nss.data_to_hex(cipher_text, separator=":"))
# Decode the cipher text by feeding it to cipher_op getting plain text back.
# Append the final bit of plain text by calling digest_final
decoded_text = self.decoding_ctx.cipher_op(cipher_text)
decoded_text += self.decoding_ctx.digest_final()
if verbose:
print "Decoded Text:\n%s" % (decoded_text)
# Validate the encryption/decryption by comparing the decoded text with
# the original plain text, they should match.
self.assertEqual(decoded_text, plain_text)
self.assertNotEqual(cipher_text, plain_text)
def test_file(self):
encrypted_filename = os.path.basename(in_filename) + ".encrypted"
decrypted_filename = os.path.basename(in_filename) + ".decrypted"
in_file = open(in_filename, "r")
encrypted_file = open(encrypted_filename, "w")
if verbose:
print "Encrypting file \"%s\" to \"%s\"" % (in_filename, encrypted_filename)
# Encode the data read from a file in chunks
while True:
# Read a chunk of data until EOF, encrypt it and write the encrypted data
in_data = in_file.read(chunk_size)
if len(in_data) == 0: # EOF
break
encrypted_data = self.encoding_ctx.cipher_op(in_data)
encrypted_file.write(encrypted_data)
# Done encoding the input, get the final encoded data, write it, close files
encrypted_data = self.encoding_ctx.digest_final()
encrypted_file.write(encrypted_data)
in_file.close()
encrypted_file.close()
# Decode the encoded file in a similar fashion
if verbose:
print "Decrypting file \"%s\" to \"%s\"" % (encrypted_filename, decrypted_filename)
encrypted_file = open(encrypted_filename, "r")
decrypted_file = open(decrypted_filename, "w")
while True:
# Read a chunk of data until EOF, encrypt it and write the encrypted data
in_data = encrypted_file.read(chunk_size)
if len(in_data) == 0: # EOF
break
decrypted_data = self.decoding_ctx.cipher_op(in_data)
decrypted_file.write(decrypted_data)
# Done encoding the input, get the final encoded data, write it, close files
decrypted_data = self.decoding_ctx.digest_final()
decrypted_file.write(decrypted_data)
encrypted_file.close()
decrypted_file.close()
# Validate the encryption/decryption by comparing the decoded text with
# the original plain text, they should match.
in_data = open(in_filename).read()
encrypted_data = open(encrypted_filename).read()
decrypted_data = open(decrypted_filename).read()
if decrypted_data != in_data:
result = 1
print "FAILED! decrypted_data != in_data"
if encrypted_data == in_data:
result = 1
print "FAILED! encrypted_data == in_data"
# clean up
os.unlink(encrypted_filename)
os.unlink(decrypted_filename)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
edawine/fatools | fatools/scripts/fautil.py | 2 | 3181 |
import sys, argparse, yaml, os
from fatools.lib.utils import cout, cerr, cexit
def init_argparser():
p = argparse.ArgumentParser('fautil')
p.add_argument('--info', default=False, action='store_true',
help = 'get information on FA assay')
p.add_argument('--view', default=False, action='store_true',
help = 'view information')
p.add_argument('--analyze', default=False, action='store_true',
help = 'analyze single FSA file')
p.add_argument('--file', default=False,
help = 'input file')
p.add_argument('--sqldb', default=False,
help = 'Sqlite database file')
p.add_argument('--sizestandard', default='LIZ600',
help = 'Size standard')
return p
cache_traces = {}
def main(args):
do_fautil(args)
def do_fautil(args):
if args.sqldb:
dbh = get_dbhandler(args)
else:
dbh = None
if args.info is not False:
do_info(args, dbh)
if args.view is not False:
do_view(args, dbh)
if args.analyze is not False:
do_analyze(args)
def get_traces(args, dbh):
traces = []
if dbh is None:
# get from infile
infile = args.file
if infile is False:
cexit('E - Please provide a filename or Sqlite database path')
abspath = os.path.abspath( args.file )
if abspath in cache_traces:
traces.append((abspath, cache_traces[abspath]))
else:
from fatools.lib.fautil.traceio import read_abif_stream
with open( abspath, 'rb') as instream:
t = read_abif_stream(instream)
cache_traces[abspath] = t
traces.append((abspath, t))
else:
pass
return traces
def do_info(args, dbh):
traces = get_traces(args, dbh)
for abspath, trace in traces:
cout('I - trace: %s' % abspath)
cout('I - runtime: %s' % trace.get_run_start_time())
def do_view(args, dbh):
traces = get_traces(args, dbh)
from fatools.lib.gui.viewer import viewer
for abspath, trace in traces:
viewer( trace )
def do_analyze(args):
""" open a tracefile, performs fragment analysis (scan & call only)
"""
from fatools.lib.fautil.traceio import read_abif_stream
from fatools.lib.fautil.traceutils import separate_channels
from fatools.lib.fsmodels.models import Assay, Marker, Panel
from fatools.lib import params
scanning_parameter = params.Params()
# create dummy markers
ladder = Marker('ladder', 10, 600, 0, None)
# create dummy panel
dummy_panel = Panel( '-', {
'ladder': args.sizestandard,
'markers': {},
})
with open(args.file, 'rb') as in_stream:
cerr('Reading FSA file: %s' % args.file)
t = read_abif_stream(in_stream)
# create a new Assay and add trace
assay = Assay()
assay.size_standard = args.sizestandard
assay._trace = t
# create all channels
assay.create_channels()
# assign all channels
assay.assign_channels( panel = dummy_panel )
# scan for peaks
assay.scan(scanning_parameter)
# scan all channels
| lgpl-3.0 |
ericzolf/ansible | lib/ansible/playbook/role/include.py | 83 | 2605 | # (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six import iteritems, string_types
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.role.definition import RoleDefinition
from ansible.playbook.role.requirement import RoleRequirement
from ansible.module_utils._text import to_native
__all__ = ['RoleInclude']
class RoleInclude(RoleDefinition):
"""
A derivative of RoleDefinition, used by playbook code when a role
is included for execution in a play.
"""
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None):
super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager,
loader=loader, collection_list=collection_list)
@staticmethod
def load(data, play, current_role_path=None, parent_role=None, variable_manager=None, loader=None, collection_list=None):
if not (isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)):
raise AnsibleParserError("Invalid role definition: %s" % to_native(data))
if isinstance(data, string_types) and ',' in data:
raise AnsibleError("Invalid old style role requirement: %s" % data)
ri = RoleInclude(play=play, role_basedir=current_role_path, variable_manager=variable_manager, loader=loader, collection_list=collection_list)
return ri.load_data(data, variable_manager=variable_manager, loader=loader)
| gpl-3.0 |
AlexJF/pelican-autostatic | autostatic.py | 1 | 9114 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals, print_function
import logging
import os
import re
import shutil
# For html escaping/unescaping
import HTMLParser
try:
from html import escape as html_escape # py3
except ImportError:
from cgi import escape as html_escape # py2
import six
from blinker import signal
from pelican import signals, generators
from pelican.contents import Static
from pelican.utils import mkdir_p, get_relative_path
logger = logging.getLogger(__name__)
html_parser = HTMLParser.HTMLParser()
autostatic_generator = None
detected_autostatic_paths = {}
autostatic_path_found = signal("autostatic_path_found")
CUSTOM_STATIC_REF_PATTERN_KEY = "AUTOSTATIC_REFERENCE_PATTERN"
USE_PELICAN_LIKE_REF_KEY = "AUTOSTATIC_USE_PELICANLIKE_REF"
# Works everywhere
DEFAULT_STATIC_REF_PATTERN = r"""{static(?:\s+|\|)((?:"|'|"|'|"|')?)(?P<path>.+?)\1(?:(?:\s+|\|)(?P<extra>.*))?\s*}"""
# Works just in url-value attributes
PELICAN_LIKE_REF_TAG = r"""{static(?:(?:\s+|\|)(?P<extra>.*))?}"""
PELICAN_LIKE_HREF_PATTERN = r"""
(?P<markup><\s*[^\>]* # match tag with all url-value attributes
(?:href|src|poster|data|cite|formaction|action)\s*=)
(?P<quote>["\']) # require value to be quoted
(?P<ref>{0}(?P<path>.*?)) # the url value
\2""".format(PELICAN_LIKE_REF_TAG)
def html_unescape(text):
if text is None:
return text
return html_parser.unescape(text)
def parse_static_references(instance, text):
def _get_static_path(match_obj):
path = match_obj.group("path")
extra = html_unescape(match_obj.group("extra"))
extra_dict = {}
instance_destination_dir = os.path.dirname(instance.save_as)
relative_path = False
using_relative_urls = instance._context.get("RELATIVE_URLS")
if extra:
for match in re.finditer(r'(\w+)="?((?:(?<!")[^\s|]+|(?<=")(?:\\.|[^"\\])*(?=")))"?', extra):
extra_dict[match.group(1)] = match.group(2)
if path.startswith('/'):
source_path = path[1:]
destination_path = source_path
relative_path = False
else:
source_path = instance.get_relative_source_path(
os.path.join(instance.relative_dir, path))
destination_path = os.path.join(instance_destination_dir, path)
relative_path = True
if "output" in extra_dict:
output_override = extra_dict["output"]
if output_override.startswith('/'):
destination_path = output_override[1:]
relative_path = False
else:
destination_path = os.path.join(instance_destination_dir, output_override)
relative_path = True
if using_relative_urls:
siteurl = get_relative_path(instance.save_as)
else:
siteurl = instance._context.get("localsiteurl", "")
if relative_path and using_relative_urls:
url = os.path.relpath(destination_path, instance_destination_dir)
else:
url = siteurl + "/" + destination_path
if "url" in extra_dict:
url_override = extra_dict["url"]
if url_override.startswith('/'):
url = siteurl + url_override
else:
url = url_override
if not using_relative_urls:
url = siteurl + "/" + os.path.dirname(instance.save_as) + "/" + url
url = url.replace('\\', '/') # for Windows paths.
static_path_obj = StaticPath(source_path, destination_path, url, extra_dict)
autostatic_path_found.send(autostatic_path=static_path_obj)
logger.debug("Detected autostatic path: {} -> {} ({})".format(
static_path_obj.source,
static_path_obj.destination,
static_path_obj.url))
detected_autostatic_paths[static_path_obj.destination] = static_path_obj.source
return html_escape(static_path_obj.url)
def _parse_pelican_like_reference(m):
return ''.join((m.group('markup'), m.group('quote'),
_get_static_path(m),
m.group('quote')))
if text is None:
return text
if isinstance(text, six.string_types):
settings = instance.settings
static_ref_re_pattern = DEFAULT_STATIC_REF_PATTERN
if settings and CUSTOM_STATIC_REF_PATTERN_KEY in settings:
static_ref_re_pattern = settings[CUSTOM_STATIC_REF_PATTERN_KEY]
text = re.sub(static_ref_re_pattern, _get_static_path, text)
if settings and settings.get(USE_PELICAN_LIKE_REF_KEY, False):
text = re.sub(PELICAN_LIKE_HREF_PATTERN,
_parse_pelican_like_reference, text, flags=re.X)
return text
elif isinstance(text, list):
return [parse_static_references(instance, item) for item in text]
else:
return text
class StaticPath(object):
def __init__(self, source, destination, url, extra):
self._source = source
self._original_destination = destination
self._destination = destination
self._original_url = url
self._url = url
self._extra = extra
@property
def source(self):
return self._source
@property
def original_destination(self):
return self._original_destination
@property
def destination(self):
return self._destination
@destination.setter
def destination(self, value):
self._destination = value
@property
def original_url(self):
return self._original_url
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = value
@property
def extra(self):
return self._extra
class AutoStaticGenerator(generators.Generator):
"""copy static paths (what you want to copy, like images, medias etc.
to output"""
autostatic_generator_init = signal("autostatic_generator_init")
autostatic_generator_preread = signal("autostatic_generator_preread")
autostatic_generator_context = signal("autostatic_generator_context")
autostatic_generator_finalized = signal("autostatic_generator_finalized")
def __init__(self, *args, **kwargs):
super(AutoStaticGenerator, self).__init__(*args, **kwargs)
self.autostatic_files = []
self.autostatic_generator_init.send(self)
def add_static_path(self, source_path, save_as):
try:
static = self.readers.read_file(
base_path=self.path, path=source_path, content_class=Static,
fmt='static', context=self.context,
preread_signal=self.autostatic_generator_preread,
preread_sender=self,
context_signal=self.autostatic_generator_context,
context_sender=self)
static.override_save_as = save_as
self.autostatic_files.append(static)
self.add_source_path(static)
except Exception as e:
logger.error("Could not process %s\n%s", source_path, e,
exc_info=self.settings.get("DEBUG", False))
def finalize_context(self):
for save_as, source_path in detected_autostatic_paths.items():
self.add_static_path(source_path, save_as)
self._update_context(('autostatic_files',))
self.autostatic_generator_finalized.send(self)
def generate_output(self, writer):
# copy all Static files
for sc in self.autostatic_files:
source_path = os.path.join(self.path, sc.source_path)
save_as = os.path.join(self.output_path, sc.save_as)
mkdir_p(os.path.dirname(save_as))
shutil.copy2(source_path, save_as)
logger.info('Copying %s to %s', sc.source_path, sc.save_as)
def find_static_references(instance):
if hasattr(instance, "_content"):
instance._content = parse_static_references(instance, instance._content)
if hasattr(instance, "_summary"):
instance._summary = parse_static_references(instance, instance._summary)
for key in instance.metadata.keys():
instance.metadata[key] = parse_static_references(instance, instance.metadata[key])
try:
setattr(instance, key.lower(), parse_static_references(instance, getattr(instance, key.lower())))
except AttributeError as e:
pass
def get_generators(_):
return AutoStaticGenerator
def autostatic_generator_initialized(generator):
global autostatic_generator
autostatic_generator = generator
def generators_finished(_):
global autostatic_generator
autostatic_generator.finalize_context()
def register():
signals.content_object_init.connect(find_static_references)
signals.get_generators.connect(get_generators)
AutoStaticGenerator.autostatic_generator_init.connect(autostatic_generator_initialized)
signals.get_writer.connect(generators_finished)
| apache-2.0 |
wouwei/PiLapse | picam/picamEnv/Lib/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| apache-2.0 |
elopezga/ErrorRate | ivi/agilent/agilentDSOX4054A.py | 7 | 1689 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent4000A import *
class agilentDSOX4054A(agilent4000A):
"Agilent InfiniiVision DSOX4054A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO-X 4054A')
super(agilentDSOX4054A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 500e6
self._init_channels()
| mit |
tboyce021/home-assistant | tests/components/rest/test_switch.py | 3 | 8431 | """The tests for the REST switch platform."""
import asyncio
import aiohttp
import homeassistant.components.rest.switch as rest
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import (
CONF_HEADERS,
CONF_NAME,
CONF_PARAMS,
CONF_PLATFORM,
CONF_RESOURCE,
CONTENT_TYPE_JSON,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_NOT_FOUND,
HTTP_OK,
)
from homeassistant.helpers.template import Template
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component
"""Tests for setting up the REST switch platform."""
NAME = "foo"
METHOD = "post"
RESOURCE = "http://localhost/"
STATE_RESOURCE = RESOURCE
HEADERS = {"Content-type": CONTENT_TYPE_JSON}
AUTH = None
PARAMS = None
async def test_setup_missing_config(hass):
"""Test setup with configuration missing required entries."""
assert not await rest.async_setup_platform(hass, {CONF_PLATFORM: rest.DOMAIN}, None)
async def test_setup_missing_schema(hass):
"""Test setup with resource missing schema."""
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: rest.DOMAIN, CONF_RESOURCE: "localhost"},
None,
)
async def test_setup_failed_connect(hass, aioclient_mock):
"""Test setup when connection error occurs."""
aioclient_mock.get("http://localhost", exc=aiohttp.ClientError)
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: rest.DOMAIN, CONF_RESOURCE: "http://localhost"},
None,
)
async def test_setup_timeout(hass, aioclient_mock):
"""Test setup when connection timeout occurs."""
aioclient_mock.get("http://localhost", exc=asyncio.TimeoutError())
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: rest.DOMAIN, CONF_RESOURCE: "http://localhost"},
None,
)
async def test_setup_minimum(hass, aioclient_mock):
"""Test setup with minimum configuration."""
aioclient_mock.get("http://localhost", status=HTTP_OK)
with assert_setup_component(1, SWITCH_DOMAIN):
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_RESOURCE: "http://localhost",
}
},
)
assert aioclient_mock.call_count == 1
async def test_setup_query_params(hass, aioclient_mock):
"""Test setup with query params."""
aioclient_mock.get("http://localhost/?search=something", status=HTTP_OK)
with assert_setup_component(1, SWITCH_DOMAIN):
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_RESOURCE: "http://localhost",
CONF_PARAMS: {"search": "something"},
}
},
)
print(aioclient_mock)
assert aioclient_mock.call_count == 1
async def test_setup(hass, aioclient_mock):
"""Test setup with valid configuration."""
aioclient_mock.get("http://localhost", status=HTTP_OK)
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_NAME: "foo",
CONF_RESOURCE: "http://localhost",
CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON},
rest.CONF_BODY_ON: "custom on text",
rest.CONF_BODY_OFF: "custom off text",
}
},
)
assert aioclient_mock.call_count == 1
assert_setup_component(1, SWITCH_DOMAIN)
async def test_setup_with_state_resource(hass, aioclient_mock):
"""Test setup with valid configuration."""
aioclient_mock.get("http://localhost", status=HTTP_NOT_FOUND)
aioclient_mock.get("http://localhost/state", status=HTTP_OK)
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_NAME: "foo",
CONF_RESOURCE: "http://localhost",
rest.CONF_STATE_RESOURCE: "http://localhost/state",
CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON},
rest.CONF_BODY_ON: "custom on text",
rest.CONF_BODY_OFF: "custom off text",
}
},
)
assert aioclient_mock.call_count == 1
assert_setup_component(1, SWITCH_DOMAIN)
"""Tests for REST switch platform."""
def _setup_test_switch(hass):
body_on = Template("on", hass)
body_off = Template("off", hass)
switch = rest.RestSwitch(
NAME,
RESOURCE,
STATE_RESOURCE,
METHOD,
HEADERS,
PARAMS,
AUTH,
body_on,
body_off,
None,
10,
True,
)
switch.hass = hass
return switch, body_on, body_off
def test_name(hass):
"""Test the name."""
switch, body_on, body_off = _setup_test_switch(hass)
assert NAME == switch.name
def test_is_on_before_update(hass):
"""Test is_on in initial state."""
switch, body_on, body_off = _setup_test_switch(hass)
assert switch.is_on is None
async def test_turn_on_success(hass, aioclient_mock):
"""Test turn_on."""
aioclient_mock.post(RESOURCE, status=HTTP_OK)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert body_on.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on
async def test_turn_on_status_not_ok(hass, aioclient_mock):
"""Test turn_on when error status returned."""
aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert body_on.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on is None
async def test_turn_on_timeout(hass, aioclient_mock):
"""Test turn_on when timeout occurs."""
aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert switch.is_on is None
async def test_turn_off_success(hass, aioclient_mock):
"""Test turn_off."""
aioclient_mock.post(RESOURCE, status=HTTP_OK)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_off()
assert body_off.template == aioclient_mock.mock_calls[-1][2].decode()
assert not switch.is_on
async def test_turn_off_status_not_ok(hass, aioclient_mock):
"""Test turn_off when error status returned."""
aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_off()
assert body_off.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on is None
async def test_turn_off_timeout(hass, aioclient_mock):
"""Test turn_off when timeout occurs."""
aioclient_mock.post(RESOURCE, exc=asyncio.TimeoutError())
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert switch.is_on is None
async def test_update_when_on(hass, aioclient_mock):
"""Test update when switch is on."""
switch, body_on, body_off = _setup_test_switch(hass)
aioclient_mock.get(RESOURCE, text=body_on.template)
await switch.async_update()
assert switch.is_on
async def test_update_when_off(hass, aioclient_mock):
"""Test update when switch is off."""
switch, body_on, body_off = _setup_test_switch(hass)
aioclient_mock.get(RESOURCE, text=body_off.template)
await switch.async_update()
assert not switch.is_on
async def test_update_when_unknown(hass, aioclient_mock):
"""Test update when unknown status returned."""
aioclient_mock.get(RESOURCE, text="unknown status")
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_update()
assert switch.is_on is None
async def test_update_timeout(hass, aioclient_mock):
"""Test update when timeout occurs."""
aioclient_mock.get(RESOURCE, exc=asyncio.TimeoutError())
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_update()
assert switch.is_on is None
| apache-2.0 |
decipher-survey-programming/utility-scripts | scripts/generatevariables.py | 1 | 1142 | #!/usr/bin/env hpython
import hstub, sys, argparse
from hermes import Survey, Results, misc
def generate(survey):
iterExtra = Results.Results(survey.path, format=None, readOnly=True).iterExtra(survey)
while True:
try:
record = iterExtra.next()
l = [str(int(record[1])), record[3], record.extra['ipAddress'], record.extra['url']]
for ev in survey.root.extraVariables:
l.append(ev)
l.append(record.extra[ev])
l = [x.replace('\t', '').replace('\n', '') for x in l]
yield '\t'.join(l)
except StopIteration:
break
def main():
parser = argparse.ArgumentParser(description="Generates variables.dat data from results data")
parser.add_argument("surveyPath", help="Survey path")
args = parser.parse_args()
surveyPath = misc.expandSurveyPath(args.surveyPath)
survey = Survey.load(surveyPath)
if survey is None:
print >> sys.stderr, "Cannot load survey %r" % surveyPath
return 1
for line in generate(survey):
print line
if __name__ == "__main__":
main()
| mit |
wwjiang007/flink | flink-python/pyflink/fn_execution/beam/beam_boot.py | 15 | 4093 | #!/usr/bin/env python
#################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
This script is a python implementation of the "boot.go" script in "beam-sdks-python-container"
project of Apache Beam, see in:
https://github.com/apache/beam/blob/release-2.14.0/sdks/python/container/boot.go
It is implemented in golang and will introduce unnecessary dependencies if used in pure python
project. So we add a python implementation which will be used when the python worker runs in
process mode. It downloads and installs users' python artifacts, then launches the python SDK
harness of Apache Beam.
"""
import argparse
import os
from subprocess import call
import grpc
import logging
import sys
from apache_beam.portability.api.beam_provision_api_pb2_grpc import ProvisionServiceStub
from apache_beam.portability.api.beam_provision_api_pb2 import GetProvisionInfoRequest
from apache_beam.portability.api.endpoints_pb2 import ApiServiceDescriptor
from google.protobuf import json_format, text_format
def check_not_empty(check_str, error_message):
if check_str == "":
logging.fatal(error_message)
exit(1)
python_exec = sys.executable
if __name__ == "__main__":
# print INFO and higher level messages
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--id", default="", help="Local identifier (required).")
parser.add_argument("--provision_endpoint", default="",
help="Provision endpoint (required).")
parser.add_argument("--semi_persist_dir", default="/tmp",
help="Local semi-persistent directory (optional).")
args = parser.parse_known_args()[0]
worker_id = args.id
provision_endpoint = args.provision_endpoint
semi_persist_dir = args.semi_persist_dir
check_not_empty(worker_id, "No id provided.")
check_not_empty(provision_endpoint, "No provision endpoint provided.")
logging.info("Initializing python harness: %s" % " ".join(sys.argv))
metadata = [("worker_id", worker_id)]
# read job information from provision stub
with grpc.insecure_channel(provision_endpoint) as channel:
client = ProvisionServiceStub(channel=channel)
info = client.GetProvisionInfo(GetProvisionInfoRequest(), metadata=metadata).info
options = json_format.MessageToJson(info.pipeline_options)
logging_endpoint = info.logging_endpoint.url
control_endpoint = info.control_endpoint.url
os.environ["WORKER_ID"] = worker_id
os.environ["PIPELINE_OPTIONS"] = options
os.environ["SEMI_PERSISTENT_DIRECTORY"] = semi_persist_dir
os.environ["LOGGING_API_SERVICE_DESCRIPTOR"] = text_format.MessageToString(
ApiServiceDescriptor(url=logging_endpoint))
os.environ["CONTROL_API_SERVICE_DESCRIPTOR"] = text_format.MessageToString(
ApiServiceDescriptor(url=control_endpoint))
env = dict(os.environ)
if "FLINK_BOOT_TESTING" in os.environ and os.environ["FLINK_BOOT_TESTING"] == "1":
exit(0)
call([python_exec, "-m", "pyflink.fn_execution.beam.beam_sdk_worker_main"],
stdout=sys.stdout, stderr=sys.stderr, env=env)
| apache-2.0 |
errx/django | django/utils/html_parser.py | 8 | 5051 | from django.utils.six.moves import html_parser as _html_parser
import re
import sys
current_version = sys.version_info
use_workaround = (
(current_version < (2, 7, 3)) or
(current_version >= (3, 0) and current_version < (3, 2, 3))
)
HTMLParseError = _html_parser.HTMLParseError
if not use_workaround:
if current_version >= (3, 4):
class HTMLParser(_html_parser.HTMLParser):
"""Explicitly set convert_charrefs to be False.
This silences a deprecation warning on Python 3.4, but we can't do
it at call time because Python 2.7 does not have the keyword
argument.
"""
def __init__(self, convert_charrefs=False):
_html_parser.HTMLParser.__init__(self, convert_charrefs=convert_charrefs)
else:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif (attrvalue[:1] == '\'' == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = (len(self.__starttag_text)
- self.__starttag_text.rfind("\n"))
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
| bsd-3-clause |
matrogers/pylearn2 | pylearn2/devtools/run_pyflakes.py | 44 | 3360 | """
Can be run as a script or imported as a module.
Module exposes the run_pyflakes method which returns a dictionary.
As a script:
python run_pyflakes.py <no_warnings>
prints out all the errors in the library
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import sys
import logging
from theano.compat import six
from pylearn2.devtools.list_files import list_files
from pylearn2.utils.shell import run_shell_command
logger = logging.getLogger(__name__)
def run_pyflakes(no_warnings=False):
"""
Return a description of all errors pyflakes finds in Pylearn2.
Parameters
----------
no_warnings : bool
If True, omits pyflakes outputs that don't correspond to actual
errors.
Returns
-------
rval : dict
Keys are pylearn2 .py filepaths
Values are outputs from pyflakes
"""
files = list_files(".py")
rval = {}
for filepath in files:
output, rc = run_shell_command('pyflakes ' + filepath)
output = output.decode(sys.getdefaultencoding())
if u'pyflakes: not found' in output:
# The return code alone does not make it possible to detect
# if pyflakes is present or not. When pyflakes is not present,
# the return code seems to always be 127, but 127 can also be
# the result of finding 127 warnings in a file.
# Therefore, we examine the output instead.
raise RuntimeError("Couldn't run 'pyflakes " + filepath + "'. "
"Error code returned:" + str(rc) +
" Output was: " + output)
output = _filter(output, no_warnings)
if output is not None:
rval[filepath] = output
return rval
def _filter(output, no_warnings):
"""
.. todo::
WRITEME
Parameters
----------
output : str
The output of pyflakes for a single.py file
no_warnings: bool
If True, removes lines corresponding to warnings rather than errors
Returns
-------
rval : None or str
`output` with blank lines and optionally lines corresponding to
warnings removed, or, if all lines are removed, returns None.
A return value of None indicates that the file is validly formatted.
"""
lines = output.split('\n')
lines = [line for line in lines
if line != '' and line.find("undefined name 'long'") == -1]
if no_warnings:
lines = [line for line in lines if
line.find("is assigned to but never used") == -1]
lines = [line for line in lines if
line.find('imported but unused') == -1]
lines = [line for line in lines if
line.find('redefinition of unused ') == -1]
if len(lines) == 0:
return None
return '\n'.join(lines)
if __name__ == '__main__':
if len(sys.argv) > 1:
no_warnings = bool(sys.argv[1])
else:
no_warnings = False
d = run_pyflakes(no_warnings=no_warnings)
for key in d:
logger.info('{0}:'.format(key))
for l in d[key].split('\n'):
logger.info('\t{0}'.format(l))
| bsd-3-clause |
aifil/odoo | openerp/report/render/render.py | 72 | 1567 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Why doing some multi-thread instead of using OSE capabilities ?
# For progress bar.
#
# Add a transparant multi-thread layer to all report rendering layers
#
# TODO: method to stock on the disk
class render(object):
""" Represents a report job being rendered.
@param bin_datas a dictionary of name:<binary content> of images etc.
@param path the path in which binary files can be discovered, useful
for components (images) of the report. It can be:
- a string, relative or absolute path to images
- a list, containing strings of paths.
If a string is absolute path, it will be opened as such, else
it will be passed to tools.file_open() which also considers zip
addons.
Reporting classes must subclass this class and redefine the __init__ and
_render methods (not the other methods).
"""
def __init__(self, bin_datas=None, path='.'):
self.done = False
if bin_datas is None:
self.bin_datas = {}
else:
self.bin_datas = bin_datas
self.path = path
def _render(self):
return None
def render(self):
self.done = False
self._result = self._render()
self.done = True
return True
def is_done(self):
return self.done
def get(self):
if self.is_done():
return self._result
else:
return None
| gpl-3.0 |
CameronLonsdale/sec-tools | python2/lib/python2.7/site-packages/click/__init__.py | 135 | 2858 | # -*- coding: utf-8 -*-
"""
click
~~~~~
Click is a simple Python module that wraps the stdlib's optparse to make
writing command line scripts fun. Unlike other modules, it's based around
a simple API that does not come with too much magic and is composable.
In case optparse ever gets removed from the stdlib, it will be shipped by
this module.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
# Core classes
from .core import Context, BaseCommand, Command, MultiCommand, Group, \
CommandCollection, Parameter, Option, Argument
# Globals
from .globals import get_current_context
# Decorators
from .decorators import pass_context, pass_obj, make_pass_decorator, \
command, group, argument, option, confirmation_option, \
password_option, version_option, help_option
# Types
from .types import ParamType, File, Path, Choice, IntRange, Tuple, \
STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED
# Utilities
from .utils import echo, get_binary_stream, get_text_stream, open_file, \
format_filename, get_app_dir, get_os_args
# Terminal functions
from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \
progressbar, clear, style, unstyle, secho, edit, launch, getchar, \
pause
# Exceptions
from .exceptions import ClickException, UsageError, BadParameter, \
FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \
MissingParameter
# Formatting
from .formatting import HelpFormatter, wrap_text
# Parsing
from .parser import OptionParser
__all__ = [
# Core classes
'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group',
'CommandCollection', 'Parameter', 'Option', 'Argument',
# Globals
'get_current_context',
# Decorators
'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group',
'argument', 'option', 'confirmation_option', 'password_option',
'version_option', 'help_option',
# Types
'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING',
'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED',
# Utilities
'echo', 'get_binary_stream', 'get_text_stream', 'open_file',
'format_filename', 'get_app_dir', 'get_os_args',
# Terminal functions
'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager',
'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch',
'getchar', 'pause',
# Exceptions
'ClickException', 'UsageError', 'BadParameter', 'FileError',
'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage',
'MissingParameter',
# Formatting
'HelpFormatter', 'wrap_text',
# Parsing
'OptionParser',
]
# Controls if click should emit the warning about the use of unicode
# literals.
disable_unicode_literals_warning = False
__version__ = '6.7'
| mit |
alxgu/ansible-modules-core | cloud/openstack/_keystone_user.py | 32 | 14115 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Based on Jimmy Tang's implementation
DOCUMENTATION = '''
---
module: keystone_user
version_added: "1.2"
deprecated: Deprecated in 2.0. Use os_user instead
short_description: Manage OpenStack Identity (keystone) users, tenants and roles
description:
- Manage users,tenants, roles from OpenStack.
options:
login_user:
description:
- login username to authenticate to keystone
required: false
default: admin
login_password:
description:
- Password of login user
required: false
default: 'yes'
login_tenant_name:
description:
- The tenant login_user belongs to
required: false
default: None
version_added: "1.3"
token:
description:
- The token to be uses in case the password is not specified
required: false
default: None
endpoint:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
user:
description:
- The name of the user that has to added/removed from OpenStack
required: false
default: None
password:
description:
- The password to be assigned to the user
required: false
default: None
tenant:
description:
- The tenant name that has be added/removed
required: false
default: None
tenant_description:
description:
- A description for the tenant
required: false
default: None
email:
description:
- An email address for the user
required: false
default: None
role:
description:
- The name of the role to be assigned or created
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
requirements:
- "python >= 2.6"
- python-keystoneclient
author: "Ansible Core Team (deprecated)"
'''
EXAMPLES = '''
# Create a tenant
- keystone_user: tenant=demo tenant_description="Default Tenant"
# Create a user
- keystone_user: user=john tenant=demo password=secrete
# Apply the admin role to the john user in the demo tenant
- keystone_user: role=admin user=john tenant=demo
'''
try:
from keystoneclient.v2_0 import client
except ImportError:
keystoneclient_found = False
else:
keystoneclient_found = True
def authenticate(endpoint, token, login_user, login_password, login_tenant_name):
"""Return a keystone client object"""
if token:
return client.Client(endpoint=endpoint, token=token)
else:
return client.Client(auth_url=endpoint, username=login_user,
password=login_password, tenant_name=login_tenant_name)
def tenant_exists(keystone, tenant):
""" Return True if tenant already exists"""
return tenant in [x.name for x in keystone.tenants.list()]
def user_exists(keystone, user):
"""" Return True if user already exists"""
return user in [x.name for x in keystone.users.list()]
def get_tenant(keystone, name):
""" Retrieve a tenant by name"""
tenants = [x for x in keystone.tenants.list() if x.name == name]
count = len(tenants)
if count == 0:
raise KeyError("No keystone tenants with name %s" % name)
elif count > 1:
raise ValueError("%d tenants with name %s" % (count, name))
else:
return tenants[0]
def get_user(keystone, name):
""" Retrieve a user by name"""
users = [x for x in keystone.users.list() if x.name == name]
count = len(users)
if count == 0:
raise KeyError("No keystone users with name %s" % name)
elif count > 1:
raise ValueError("%d users with name %s" % (count, name))
else:
return users[0]
def get_role(keystone, name):
""" Retrieve a role by name"""
roles = [x for x in keystone.roles.list() if x.name == name]
count = len(roles)
if count == 0:
raise KeyError("No keystone roles with name %s" % name)
elif count > 1:
raise ValueError("%d roles with name %s" % (count, name))
else:
return roles[0]
def get_tenant_id(keystone, name):
return get_tenant(keystone, name).id
def get_user_id(keystone, name):
return get_user(keystone, name).id
def ensure_tenant_exists(keystone, tenant_name, tenant_description,
check_mode):
""" Ensure that a tenant exists.
Return (True, id) if a new tenant was created, (False, None) if it
already existed.
"""
# Check if tenant already exists
try:
tenant = get_tenant(keystone, tenant_name)
except KeyError:
# Tenant doesn't exist yet
pass
else:
if tenant.description == tenant_description:
return (False, tenant.id)
else:
# We need to update the tenant description
if check_mode:
return (True, tenant.id)
else:
tenant.update(description=tenant_description)
return (True, tenant.id)
# We now know we will have to create a new tenant
if check_mode:
return (True, None)
ks_tenant = keystone.tenants.create(tenant_name=tenant_name,
description=tenant_description,
enabled=True)
return (True, ks_tenant.id)
def ensure_tenant_absent(keystone, tenant, check_mode):
""" Ensure that a tenant does not exist
Return True if the tenant was removed, False if it didn't exist
in the first place
"""
if not tenant_exists(keystone, tenant):
return False
# We now know we will have to delete the tenant
if check_mode:
return True
def ensure_user_exists(keystone, user_name, password, email, tenant_name,
check_mode):
""" Check if user exists
Return (True, id) if a new user was created, (False, id) user alrady
exists
"""
# Check if tenant already exists
try:
user = get_user(keystone, user_name)
except KeyError:
# Tenant doesn't exist yet
pass
else:
# User does exist, we're done
return (False, user.id)
# We now know we will have to create a new user
if check_mode:
return (True, None)
tenant = get_tenant(keystone, tenant_name)
user = keystone.users.create(name=user_name, password=password,
email=email, tenant_id=tenant.id)
return (True, user.id)
def ensure_role_exists(keystone, role_name):
# Get the role if it exists
try:
role = get_role(keystone, role_name)
# Role does exist, we're done
return (False, role.id)
except KeyError:
# Role doesn't exist yet
pass
role = keystone.roles.create(role_name)
return (True, role.id)
def ensure_user_role_exists(keystone, user_name, tenant_name, role_name,
check_mode):
""" Check if role exists
Return (True, id) if a new role was created or if the role was newly
assigned to the user for the tenant. (False, id) if the role already
exists and was already assigned to the user ofr the tenant.
"""
# Check if the user has the role in the tenant
user = get_user(keystone, user_name)
tenant = get_tenant(keystone, tenant_name)
roles = [x for x in keystone.roles.roles_for_user(user, tenant)
if x.name == role_name]
count = len(roles)
if count == 1:
# If the role is in there, we are done
role = roles[0]
return (False, role.id)
elif count > 1:
# Too many roles with the same name, throw an error
raise ValueError("%d roles with name %s" % (count, role_name))
# At this point, we know we will need to make changes
if check_mode:
return (True, None)
# Get the role if it exists
try:
role = get_role(keystone, role_name)
except KeyError:
# Role doesn't exist yet
role = keystone.roles.create(role_name)
# Associate the role with the user in the admin
keystone.roles.add_user_role(user, role, tenant)
return (True, role.id)
def ensure_user_absent(keystone, user, check_mode):
raise NotImplementedError("Not yet implemented")
def ensure_user_role_absent(keystone, uesr, tenant, role, check_mode):
raise NotImplementedError("Not yet implemented")
def ensure_role_absent(keystone, role_name):
raise NotImplementedError("Not yet implemented")
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
tenant_description=dict(required=False),
email=dict(required=False),
user=dict(required=False),
tenant=dict(required=False),
password=dict(required=False),
role=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(required=False,
default="http://127.0.0.1:35357/v2.0"),
token=dict(required=False),
login_user=dict(required=False),
login_password=dict(required=False),
login_tenant_name=dict(required=False)
))
# keystone operations themselves take an endpoint, not a keystone auth_url
del(argument_spec['auth_url'])
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['token', 'login_user'],
['token', 'login_password'],
['token', 'login_tenant_name']]
)
if not keystoneclient_found:
module.fail_json(msg="the python-keystoneclient module is required")
user = module.params['user']
password = module.params['password']
tenant = module.params['tenant']
tenant_description = module.params['tenant_description']
email = module.params['email']
role = module.params['role']
state = module.params['state']
endpoint = module.params['endpoint']
token = module.params['token']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_tenant_name = module.params['login_tenant_name']
keystone = authenticate(endpoint, token, login_user, login_password, login_tenant_name)
check_mode = module.check_mode
try:
d = dispatch(keystone, user, password, tenant, tenant_description,
email, role, state, endpoint, token, login_user,
login_password, check_mode)
except Exception as e:
if check_mode:
# If we have a failure in check mode
module.exit_json(changed=True,
msg="exception: %s" % e)
else:
module.fail_json(msg="exception: %s" % e)
else:
module.exit_json(**d)
def dispatch(keystone, user=None, password=None, tenant=None,
tenant_description=None, email=None, role=None,
state="present", endpoint=None, token=None, login_user=None,
login_password=None, check_mode=False):
""" Dispatch to the appropriate method.
Returns a dict that will be passed to exit_json
tenant user role state
------ ---- ---- --------
X present ensure_tenant_exists
X absent ensure_tenant_absent
X X present ensure_user_exists
X X absent ensure_user_absent
X X X present ensure_user_role_exists
X X X absent ensure_user_role_absent
X present ensure_role_exists
X absent ensure_role_absent
"""
changed = False
id = None
if not tenant and not user and role and state == "present":
changed, id = ensure_role_exists(keystone, role)
elif not tenant and not user and role and state == "absent":
changed = ensure_role_absent(keystone, role)
elif tenant and not user and not role and state == "present":
changed, id = ensure_tenant_exists(keystone, tenant,
tenant_description, check_mode)
elif tenant and not user and not role and state == "absent":
changed = ensure_tenant_absent(keystone, tenant, check_mode)
elif tenant and user and not role and state == "present":
changed, id = ensure_user_exists(keystone, user, password,
email, tenant, check_mode)
elif tenant and user and not role and state == "absent":
changed = ensure_user_absent(keystone, user, check_mode)
elif tenant and user and role and state == "present":
changed, id = ensure_user_role_exists(keystone, user, tenant, role,
check_mode)
elif tenant and user and role and state == "absent":
changed = ensure_user_role_absent(keystone, user, tenant, role, check_mode)
else:
# Should never reach here
raise ValueError("Code should never reach here")
return dict(changed=changed, id=id)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
shivanikhosa/browserscope | static_mode/richtext_0.py | 9 | 21963 | (dp0
VPlayStation
p1
(dp2
S'summary_display'
p3
S'0/149'
p4
sS'total_runs'
p5
L3L
sS'summary_score'
p6
I0
sS'results'
p7
(dp8
S'unapply'
p9
(dp10
S'score'
p11
I0
sS'raw_score'
p12
I0
sS'display'
p13
S'0/26'
p14
ssS'apply'
p15
(dp16
g11
I0
sg12
I0
sg13
S'0/41'
p17
ssS'change'
p18
(dp19
g11
I0
sg12
I0
sg13
S'0/17'
p20
ssS'query'
p21
(dp22
g11
I0
sg12
I0
sg13
S'0/65'
p23
ssssS'total_runs'
p24
L10760L
sVChrome
p25
(dp26
S'summary_display'
p27
S'129/149'
p28
sS'total_runs'
p29
L1831L
sS'summary_score'
p30
I87
sS'results'
p31
(dp32
S'unapply'
p33
(dp34
S'score'
p35
I88
sS'raw_score'
p36
I0
sS'display'
p37
S'23/26'
p38
ssS'apply'
p39
(dp40
g35
I95
sg36
I0
sg37
S'39/41'
p41
ssS'change'
p42
(dp43
g35
I100
sg36
I0
sg37
S'17/17'
p44
ssS'query'
p45
(dp46
g35
I77
sg36
I0
sg37
S'50/65'
p47
ssssVStainless
p48
(dp49
S'summary_display'
p50
S'125/149'
p51
sS'total_runs'
p52
L5L
sS'summary_score'
p53
I84
sS'results'
p54
(dp55
S'unapply'
p56
(dp57
S'score'
p58
I73
sS'raw_score'
p59
I0
sS'display'
p60
S'19/26'
p61
ssS'apply'
p62
(dp63
g58
I95
sg59
I0
sg60
S'39/41'
p64
ssS'change'
p65
(dp66
g58
I100
sg59
I0
sg60
S'17/17'
p67
ssS'query'
p68
(dp69
g58
I77
sg59
I0
sg60
S'50/65'
p70
ssssVSeaMonkey
p71
(dp72
S'summary_display'
p73
S'117/149'
p74
sS'total_runs'
p75
L36L
sS'summary_score'
p76
I79
sS'results'
p77
(dp78
S'unapply'
p79
(dp80
S'score'
p81
I65
sS'raw_score'
p82
I0
sS'display'
p83
S'17/26'
p84
ssS'apply'
p85
(dp86
g81
I83
sg82
I0
sg83
S'34/41'
p87
ssS'change'
p88
(dp89
g81
I41
sg82
I0
sg83
S'7/17'
p90
ssS'query'
p91
(dp92
g81
I91
sg82
I0
sg83
S'59/65'
p93
ssssVFirefox (Minefield)
p94
(dp95
S'summary_display'
p96
S'117/149'
p97
sS'total_runs'
p98
L182L
sS'summary_score'
p99
I79
sS'results'
p100
(dp101
S'unapply'
p102
(dp103
S'score'
p104
I65
sS'raw_score'
p105
I0
sS'display'
p106
S'17/26'
p107
ssS'apply'
p108
(dp109
g104
I83
sg105
I0
sg106
S'34/41'
p110
ssS'change'
p111
(dp112
g104
I41
sg105
I0
sg106
S'7/17'
p113
ssS'query'
p114
(dp115
g104
I91
sg105
I0
sg106
S'59/65'
p116
ssssVFirefox (Shiretoko)
p117
(dp118
S'summary_display'
p119
S'117/149'
p120
sS'total_runs'
p121
L176L
sS'summary_score'
p122
I79
sS'results'
p123
(dp124
S'unapply'
p125
(dp126
S'score'
p127
I65
sS'raw_score'
p128
I0
sS'display'
p129
S'17/26'
p130
ssS'apply'
p131
(dp132
g127
I83
sg128
I0
sg129
S'34/41'
p133
ssS'change'
p134
(dp135
g127
I41
sg128
I0
sg129
S'7/17'
p136
ssS'query'
p137
(dp138
g127
I91
sg128
I0
sg129
S'59/65'
p139
ssssVMaxthon
p140
(dp141
S'summary_display'
p142
S'99/149'
p143
sS'total_runs'
p144
L19L
sS'summary_score'
p145
I66
sS'results'
p146
(dp147
S'unapply'
p148
(dp149
S'score'
p150
I54
sS'raw_score'
p151
I0
sS'display'
p152
S'14/26'
p153
ssS'apply'
p154
(dp155
g150
I59
sg151
I0
sg152
S'24/41'
p156
ssS'change'
p157
(dp158
g150
I29
sg151
I0
sg152
S'5/17'
p159
ssS'query'
p160
(dp161
g150
I86
sg151
I0
sg152
S'56/65'
p162
ssssVCamino
p163
(dp164
S'summary_display'
p165
S'117/149'
p166
sS'total_runs'
p167
L21L
sS'summary_score'
p168
I79
sS'results'
p169
(dp170
S'unapply'
p171
(dp172
S'score'
p173
I65
sS'raw_score'
p174
I0
sS'display'
p175
S'17/26'
p176
ssS'apply'
p177
(dp178
g173
I83
sg174
I0
sg175
S'34/41'
p179
ssS'change'
p180
(dp181
g173
I41
sg174
I0
sg175
S'7/17'
p182
ssS'query'
p183
(dp184
g173
I91
sg174
I0
sg175
S'59/65'
p185
ssssVOther
p186
(dp187
S'summary_display'
p188
S'123/149'
p189
sS'total_runs'
p190
L65L
sS'summary_score'
p191
I83
sS'results'
p192
(dp193
S'unapply'
p194
(dp195
S'score'
p196
I69
sS'raw_score'
p197
I0
sS'display'
p198
S'18/26'
p199
ssS'apply'
p200
(dp201
g196
I95
sg197
I0
sg198
S'39/41'
p202
ssS'change'
p203
(dp204
g196
I100
sg197
I0
sg198
S'17/17'
p205
ssS'query'
p206
(dp207
g196
I75
sg197
I0
sg198
S'49/65'
p208
ssssVGranParadiso
p209
(dp210
S'summary_display'
p211
S'117/149'
p212
sS'total_runs'
p213
L1L
sS'summary_score'
p214
I79
sS'results'
p215
(dp216
S'unapply'
p217
(dp218
S'score'
p219
I65
sS'raw_score'
p220
I0
sS'display'
p221
S'17/26'
p222
ssS'apply'
p223
(dp224
g219
I83
sg220
I0
sg221
S'34/41'
p225
ssS'change'
p226
(dp227
g219
I41
sg220
I0
sg221
S'7/17'
p228
ssS'query'
p229
(dp230
g219
I91
sg220
I0
sg221
S'59/65'
p231
ssssVOpera Mini
p232
(dp233
S'summary_display'
p234
S'26/149'
p235
sS'total_runs'
p236
L25L
sS'summary_score'
p237
I17
sS'results'
p238
(dp239
S'unapply'
p240
(dp241
S'score'
p242
I100
sS'raw_score'
p243
I0
sS'display'
p244
S'26/26'
p245
ssS'apply'
p246
(dp247
g242
I0
sg243
I0
sg244
S'0/41'
p248
ssS'change'
p249
(dp250
g242
I0
sg243
I0
sg244
S'0/17'
p251
ssS'query'
p252
(dp253
g242
I0
sg243
I0
sg244
S'0/65'
p254
ssssVAndroid
p255
(dp256
S'summary_display'
p257
S'118/149'
p258
sS'total_runs'
p259
L51L
sS'summary_score'
p260
I79
sS'results'
p261
(dp262
S'unapply'
p263
(dp264
S'score'
p265
I65
sS'raw_score'
p266
I0
sS'display'
p267
S'17/26'
p268
ssS'apply'
p269
(dp270
g265
I83
sg266
I0
sg267
S'34/41'
p271
ssS'change'
p272
(dp273
g265
I100
sg266
I0
sg267
S'17/17'
p274
ssS'query'
p275
(dp276
g265
I77
sg266
I0
sg267
S'50/65'
p277
ssssVUzbl
p278
(dp279
S'summary_display'
p280
S'129/149'
p281
sS'total_runs'
p282
L24L
sS'summary_score'
p283
I87
sS'results'
p284
(dp285
S'unapply'
p286
(dp287
S'score'
p288
I88
sS'raw_score'
p289
I0
sS'display'
p290
S'23/26'
p291
ssS'apply'
p292
(dp293
g288
I95
sg289
I0
sg290
S'39/41'
p294
ssS'change'
p295
(dp296
g288
I100
sg289
I0
sg290
S'17/17'
p297
ssS'query'
p298
(dp299
g288
I77
sg289
I0
sg290
S'50/65'
p300
ssssVVodafone
p301
(dp302
S'summary_display'
p303
S'117/149'
p304
sS'total_runs'
p305
L1L
sS'summary_score'
p306
I79
sS'results'
p307
(dp308
S'unapply'
p309
(dp310
S'score'
p311
I65
sS'raw_score'
p312
I0
sS'display'
p313
S'17/26'
p314
ssS'apply'
p315
(dp316
g311
I83
sg312
I0
sg313
S'34/41'
p317
ssS'change'
p318
(dp319
g311
I41
sg312
I0
sg313
S'7/17'
p320
ssS'query'
p321
(dp322
g311
I91
sg312
I0
sg313
S'59/65'
p323
ssssVVienna
p324
(dp325
S'summary_display'
p326
S'123/149'
p327
sS'total_runs'
p328
L3L
sS'summary_score'
p329
I83
sS'results'
p330
(dp331
S'unapply'
p332
(dp333
S'score'
p334
I73
sS'raw_score'
p335
I0
sS'display'
p336
S'19/26'
p337
ssS'apply'
p338
(dp339
g334
I90
sg335
I0
sg336
S'37/41'
p340
ssS'change'
p341
(dp342
g334
I100
sg335
I0
sg336
S'17/17'
p343
ssS'query'
p344
(dp345
g334
I77
sg335
I0
sg336
S'50/65'
p346
ssssVSleipnir
p347
(dp348
S'summary_display'
p349
S'99/149'
p350
sS'total_runs'
p351
L15L
sS'summary_score'
p352
I66
sS'results'
p353
(dp354
S'unapply'
p355
(dp356
S'score'
p357
I54
sS'raw_score'
p358
I0
sS'display'
p359
S'14/26'
p360
ssS'apply'
p361
(dp362
g357
I59
sg358
I0
sg359
S'24/41'
p363
ssS'change'
p364
(dp365
g357
I29
sg358
I0
sg359
S'5/17'
p366
ssS'query'
p367
(dp368
g357
I86
sg358
I0
sg359
S'56/65'
p369
ssssVNetFront
p370
(dp371
S'summary_display'
p372
S'117/149'
p373
sS'total_runs'
p374
L2L
sS'summary_score'
p375
I79
sS'results'
p376
(dp377
S'unapply'
p378
(dp379
S'score'
p380
I65
sS'raw_score'
p381
I0
sS'display'
p382
S'17/26'
p383
ssS'apply'
p384
(dp385
g380
I83
sg381
I0
sg382
S'34/41'
p386
ssS'change'
p387
(dp388
g380
I41
sg381
I0
sg382
S'7/17'
p389
ssS'query'
p390
(dp391
g380
I91
sg381
I0
sg382
S'59/65'
p392
ssssVNokia
p393
(dp394
S'summary_display'
p395
S'100/149'
p396
sS'total_runs'
p397
L5L
sS'summary_score'
p398
I67
sS'results'
p399
(dp400
S'unapply'
p401
(dp402
S'score'
p403
I31
sS'raw_score'
p404
I0
sS'display'
p405
S'8/26'
p406
ssS'apply'
p407
(dp408
g403
I68
sg404
I0
sg405
S'28/41'
p409
ssS'change'
p410
(dp411
g403
I100
sg404
I0
sg405
S'17/17'
p412
ssS'query'
p413
(dp414
g403
I72
sg404
I0
sg405
S'47/65'
p415
ssssVJasmine
p416
(dp417
S'summary_display'
p418
S'145/149'
p419
sS'total_runs'
p420
L2L
sS'summary_score'
p421
I97
sS'results'
p422
(dp423
S'unapply'
p424
(dp425
S'score'
p426
I88
sS'raw_score'
p427
I0
sS'display'
p428
S'23/26'
p429
ssS'apply'
p430
(dp431
g426
I98
sg427
I0
sg428
S'40/41'
p432
ssS'change'
p433
(dp434
g426
I100
sg427
I0
sg428
S'17/17'
p435
ssS'query'
p436
(dp437
g426
I100
sg427
I0
sg428
S'65/65'
p438
ssssVFluid
p439
(dp440
S'summary_display'
p441
S'125/149'
p442
sS'total_runs'
p443
L1L
sS'summary_score'
p444
I84
sS'results'
p445
(dp446
S'unapply'
p447
(dp448
S'score'
p449
I73
sS'raw_score'
p450
I0
sS'display'
p451
S'19/26'
p452
ssS'apply'
p453
(dp454
g449
I95
sg450
I0
sg451
S'39/41'
p455
ssS'change'
p456
(dp457
g449
I100
sg450
I0
sg451
S'17/17'
p458
ssS'query'
p459
(dp460
g449
I77
sg450
I0
sg451
S'50/65'
p461
ssssVK-Meleon
p462
(dp463
S'summary_display'
p464
S'117/149'
p465
sS'total_runs'
p466
L9L
sS'summary_score'
p467
I79
sS'results'
p468
(dp469
S'unapply'
p470
(dp471
S'score'
p472
I65
sS'raw_score'
p473
I0
sS'display'
p474
S'17/26'
p475
ssS'apply'
p476
(dp477
g472
I83
sg473
I0
sg474
S'34/41'
p478
ssS'change'
p479
(dp480
g472
I41
sg473
I0
sg474
S'7/17'
p481
ssS'query'
p482
(dp483
g472
I91
sg473
I0
sg474
S'59/65'
p484
ssssVChrome Frame (IE 7)
p485
(dp486
S'summary_display'
p487
S'129/149'
p488
sS'total_runs'
p489
L4L
sS'summary_score'
p490
I87
sS'results'
p491
(dp492
S'unapply'
p493
(dp494
S'score'
p495
I88
sS'raw_score'
p496
I0
sS'display'
p497
S'23/26'
p498
ssS'apply'
p499
(dp500
g495
I95
sg496
I0
sg497
S'39/41'
p501
ssS'change'
p502
(dp503
g495
I100
sg496
I0
sg497
S'17/17'
p504
ssS'query'
p505
(dp506
g495
I77
sg496
I0
sg497
S'50/65'
p507
ssssVSafari
p508
(dp509
S'summary_display'
p510
S'125/149'
p511
sS'total_runs'
p512
L1024L
sS'summary_score'
p513
I84
sS'results'
p514
(dp515
S'unapply'
p516
(dp517
S'score'
p518
I73
sS'raw_score'
p519
I0
sS'display'
p520
S'19/26'
p521
ssS'apply'
p522
(dp523
g518
I95
sg519
I0
sg520
S'39/41'
p524
ssS'change'
p525
(dp526
g518
I100
sg519
I0
sg520
S'17/17'
p527
ssS'query'
p528
(dp529
g518
I77
sg519
I0
sg520
S'50/65'
p530
ssssVGaleon
p531
(dp532
S'summary_display'
p533
S'117/149'
p534
sS'total_runs'
p535
L2L
sS'summary_score'
p536
I79
sS'results'
p537
(dp538
S'unapply'
p539
(dp540
S'score'
p541
I65
sS'raw_score'
p542
I0
sS'display'
p543
S'17/26'
p544
ssS'apply'
p545
(dp546
g541
I83
sg542
I0
sg543
S'34/41'
p547
ssS'change'
p548
(dp549
g541
I41
sg542
I0
sg543
S'7/17'
p550
ssS'query'
p551
(dp552
g541
I91
sg542
I0
sg543
S'59/65'
p553
ssssVNetNewsWire
p554
(dp555
S'summary_display'
p556
S'125/149'
p557
sS'total_runs'
p558
L10L
sS'summary_score'
p559
I84
sS'results'
p560
(dp561
S'unapply'
p562
(dp563
S'score'
p564
I73
sS'raw_score'
p565
I0
sS'display'
p566
S'19/26'
p567
ssS'apply'
p568
(dp569
g564
I95
sg565
I0
sg566
S'39/41'
p570
ssS'change'
p571
(dp572
g564
I100
sg565
I0
sg566
S'17/17'
p573
ssS'query'
p574
(dp575
g564
I77
sg565
I0
sg566
S'50/65'
p576
ssssVMaemo Browser
p577
(dp578
S'summary_display'
p579
S'118/149'
p580
sS'total_runs'
p581
L1L
sS'summary_score'
p582
I79
sS'results'
p583
(dp584
S'unapply'
p585
(dp586
S'score'
p587
I65
sS'raw_score'
p588
I0
sS'display'
p589
S'17/26'
p590
ssS'apply'
p591
(dp592
g587
I83
sg588
I0
sg589
S'34/41'
p593
ssS'change'
p594
(dp595
g587
I41
sg588
I0
sg589
S'7/17'
p596
ssS'query'
p597
(dp598
g587
I92
sg588
I0
sg589
S'60/65'
p599
ssssVPalm Pre
p600
(dp601
S'summary_display'
p602
S'129/149'
p603
sS'total_runs'
p604
L7L
sS'summary_score'
p605
I87
sS'results'
p606
(dp607
S'unapply'
p608
(dp609
S'score'
p610
I88
sS'raw_score'
p611
I0
sS'display'
p612
S'23/26'
p613
ssS'apply'
p614
(dp615
g610
I95
sg611
I0
sg612
S'39/41'
p616
ssS'change'
p617
(dp618
g610
I100
sg611
I0
sg612
S'17/17'
p619
ssS'query'
p620
(dp621
g610
I77
sg611
I0
sg612
S'50/65'
p622
ssssVNetscape
p623
(dp624
S'summary_display'
p625
S'118/149'
p626
sS'total_runs'
p627
L3L
sS'summary_score'
p628
I79
sS'results'
p629
(dp630
S'unapply'
p631
(dp632
S'score'
p633
I65
sS'raw_score'
p634
I0
sS'display'
p635
S'17/26'
p636
ssS'apply'
p637
(dp638
g633
I85
sg634
I0
sg635
S'35/41'
p639
ssS'change'
p640
(dp641
g633
I41
sg634
I0
sg635
S'7/17'
p642
ssS'query'
p643
(dp644
g633
I91
sg634
I0
sg635
S'59/65'
p645
ssssVIceweasel
p646
(dp647
S'summary_display'
p648
S'117/149'
p649
sS'total_runs'
p650
L55L
sS'summary_score'
p651
I79
sS'results'
p652
(dp653
S'unapply'
p654
(dp655
S'score'
p656
I65
sS'raw_score'
p657
I0
sS'display'
p658
S'17/26'
p659
ssS'apply'
p660
(dp661
g656
I83
sg657
I0
sg658
S'34/41'
p662
ssS'change'
p663
(dp664
g656
I41
sg657
I0
sg658
S'7/17'
p665
ssS'query'
p666
(dp667
g656
I91
sg657
I0
sg658
S'59/65'
p668
ssssVKonqueror
p669
(dp670
S'summary_display'
p671
S'0/149'
p672
sS'total_runs'
p673
L37L
sS'summary_score'
p674
I0
sS'results'
p675
(dp676
S'unapply'
p677
(dp678
S'score'
p679
I0
sS'raw_score'
p680
I0
sS'display'
p681
S'0/26'
p682
ssS'apply'
p683
(dp684
g679
I0
sg680
I0
sg681
S'0/41'
p685
ssS'change'
p686
(dp687
g679
I0
sg680
I0
sg681
S'0/17'
p688
ssS'query'
p689
(dp690
g679
I0
sg680
I0
sg681
S'0/65'
p691
ssssVKazehakase
p692
(dp693
S'summary_display'
p694
S'117/149'
p695
sS'total_runs'
p696
L1L
sS'summary_score'
p697
I79
sS'results'
p698
(dp699
S'unapply'
p700
(dp701
S'score'
p702
I65
sS'raw_score'
p703
I0
sS'display'
p704
S'17/26'
p705
ssS'apply'
p706
(dp707
g702
I83
sg703
I0
sg704
S'34/41'
p708
ssS'change'
p709
(dp710
g702
I41
sg703
I0
sg704
S'7/17'
p711
ssS'query'
p712
(dp713
g702
I91
sg703
I0
sg704
S'59/65'
p714
ssssVOpera
p715
(dp716
S'summary_display'
p717
S'89/149'
p718
sS'total_runs'
p719
L1319L
sS'summary_score'
p720
I60
sS'results'
p721
(dp722
S'unapply'
p723
(dp724
S'score'
p725
I50
sS'raw_score'
p726
I0
sS'display'
p727
S'13/26'
p728
ssS'apply'
p729
(dp730
g725
I63
sg726
I0
sg727
S'26/41'
p731
ssS'change'
p732
(dp733
g725
I41
sg726
I0
sg727
S'7/17'
p734
ssS'query'
p735
(dp736
g725
I66
sg726
I0
sg727
S'43/65'
p737
ssssVAvant
p738
(dp739
S'summary_display'
p740
S'96/149'
p741
sS'total_runs'
p742
L13L
sS'summary_score'
p743
I64
sS'results'
p744
(dp745
S'unapply'
p746
(dp747
S'score'
p748
I42
sS'raw_score'
p749
I0
sS'display'
p750
S'11/26'
p751
ssS'apply'
p752
(dp753
g748
I59
sg749
I0
sg750
S'24/41'
p754
ssS'change'
p755
(dp756
g748
I29
sg749
I0
sg750
S'5/17'
p757
ssS'query'
p758
(dp759
g748
I86
sg749
I0
sg750
S'56/65'
p760
ssssVQtWeb
p761
(dp762
S'summary_display'
p763
S'111/149'
p764
sS'total_runs'
p765
L1L
sS'summary_score'
p766
I74
sS'results'
p767
(dp768
S'unapply'
p769
(dp770
S'score'
p771
I35
sS'raw_score'
p772
I0
sS'display'
p773
S'9/26'
p774
ssS'apply'
p775
(dp776
g771
I85
sg772
I0
sg773
S'35/41'
p777
ssS'change'
p778
(dp779
g771
I100
sg772
I0
sg773
S'17/17'
p780
ssS'query'
p781
(dp782
g771
I77
sg772
I0
sg773
S'50/65'
p783
ssssVMicroB
p784
(dp785
S'summary_display'
p786
S'118/149'
p787
sS'total_runs'
p788
L1L
sS'summary_score'
p789
I79
sS'results'
p790
(dp791
S'unapply'
p792
(dp793
S'score'
p794
I65
sS'raw_score'
p795
I0
sS'display'
p796
S'17/26'
p797
ssS'apply'
p798
(dp799
g794
I83
sg795
I0
sg796
S'34/41'
p800
ssS'change'
p801
(dp802
g794
I41
sg795
I0
sg796
S'7/17'
p803
ssS'query'
p804
(dp805
g794
I92
sg795
I0
sg796
S'60/65'
p806
ssssVEpiphany
p807
(dp808
S'summary_display'
p809
S'117/149'
p810
sS'total_runs'
p811
L7L
sS'summary_score'
p812
I79
sS'results'
p813
(dp814
S'unapply'
p815
(dp816
S'score'
p817
I65
sS'raw_score'
p818
I0
sS'display'
p819
S'17/26'
p820
ssS'apply'
p821
(dp822
g817
I83
sg818
I0
sg819
S'34/41'
p823
ssS'change'
p824
(dp825
g817
I41
sg818
I0
sg819
S'7/17'
p826
ssS'query'
p827
(dp828
g817
I91
sg818
I0
sg819
S'59/65'
p829
ssssViPhone
p830
(dp831
S'summary_display'
p832
S'118/149'
p833
sS'total_runs'
p834
L116L
sS'summary_score'
p835
I79
sS'results'
p836
(dp837
S'unapply'
p838
(dp839
S'score'
p840
I65
sS'raw_score'
p841
I0
sS'display'
p842
S'17/26'
p843
ssS'apply'
p844
(dp845
g840
I83
sg841
I0
sg842
S'34/41'
p846
ssS'change'
p847
(dp848
g840
I100
sg841
I0
sg842
S'17/17'
p849
ssS'query'
p850
(dp851
g840
I77
sg841
I0
sg842
S'50/65'
p852
ssssVIron
p853
(dp854
S'summary_display'
p855
S'129/149'
p856
sS'total_runs'
p857
L74L
sS'summary_score'
p858
I87
sS'results'
p859
(dp860
S'unapply'
p861
(dp862
S'score'
p863
I88
sS'raw_score'
p864
I0
sS'display'
p865
S'23/26'
p866
ssS'apply'
p867
(dp868
g863
I95
sg864
I0
sg865
S'39/41'
p869
ssS'change'
p870
(dp871
g863
I100
sg864
I0
sg865
S'17/17'
p872
ssS'query'
p873
(dp874
g863
I77
sg864
I0
sg865
S'50/65'
p875
ssssVShiira
p876
(dp877
S'summary_display'
p878
S'125/149'
p879
sS'total_runs'
p880
L1L
sS'summary_score'
p881
I84
sS'results'
p882
(dp883
S'unapply'
p884
(dp885
S'score'
p886
I73
sS'raw_score'
p887
I0
sS'display'
p888
S'19/26'
p889
ssS'apply'
p890
(dp891
g886
I95
sg887
I0
sg888
S'39/41'
p892
ssS'change'
p893
(dp894
g886
I100
sg887
I0
sg888
S'17/17'
p895
ssS'query'
p896
(dp897
g886
I77
sg887
I0
sg888
S'50/65'
p898
ssssVMidori
p899
(dp900
S'summary_display'
p901
S'125/149'
p902
sS'total_runs'
p903
L19L
sS'summary_score'
p904
I84
sS'results'
p905
(dp906
S'unapply'
p907
(dp908
S'score'
p909
I73
sS'raw_score'
p910
I0
sS'display'
p911
S'19/26'
p912
ssS'apply'
p913
(dp914
g909
I95
sg910
I0
sg911
S'39/41'
p915
ssS'change'
p916
(dp917
g909
I100
sg910
I0
sg911
S'17/17'
p918
ssS'query'
p919
(dp920
g909
I77
sg910
I0
sg911
S'50/65'
p921
ssssVIE
p922
(dp923
S'summary_display'
p924
S'99/149'
p925
sS'total_runs'
p926
L787L
sS'summary_score'
p927
I66
sS'results'
p928
(dp929
S'unapply'
p930
(dp931
S'score'
p932
I54
sS'raw_score'
p933
I0
sS'display'
p934
S'14/26'
p935
ssS'apply'
p936
(dp937
g932
I59
sg933
I0
sg934
S'24/41'
p938
ssS'change'
p939
(dp940
g932
I29
sg933
I0
sg934
S'5/17'
p941
ssS'query'
p942
(dp943
g932
I86
sg933
I0
sg934
S'56/65'
p944
ssssVFirefox
p945
(dp946
S'summary_display'
p947
S'117/149'
p948
sS'total_runs'
p949
L4665L
sS'summary_score'
p950
I79
sS'results'
p951
(dp952
S'unapply'
p953
(dp954
S'score'
p955
I65
sS'raw_score'
p956
I0
sS'display'
p957
S'17/26'
p958
ssS'apply'
p959
(dp960
g955
I83
sg956
I0
sg957
S'34/41'
p961
ssS'change'
p962
(dp963
g955
I41
sg956
I0
sg957
S'7/17'
p964
ssS'query'
p965
(dp966
g955
I91
sg956
I0
sg957
S'59/65'
p967
ssssVLunascape
p968
(dp969
S'summary_display'
p970
S'117/149'
p971
sS'total_runs'
p972
L22L
sS'summary_score'
p973
I79
sS'results'
p974
(dp975
S'unapply'
p976
(dp977
S'score'
p978
I65
sS'raw_score'
p979
I0
sS'display'
p980
S'17/26'
p981
ssS'apply'
p982
(dp983
g978
I83
sg979
I0
sg980
S'34/41'
p984
ssS'change'
p985
(dp986
g978
I41
sg979
I0
sg980
S'7/17'
p987
ssS'query'
p988
(dp989
g978
I91
sg979
I0
sg980
S'59/65'
p990
ssssVSwiftfox
p991
(dp992
S'summary_display'
p993
S'117/149'
p994
sS'total_runs'
p995
L9L
sS'summary_score'
p996
I79
sS'results'
p997
(dp998
S'unapply'
p999
(dp1000
S'score'
p1001
I65
sS'raw_score'
p1002
I0
sS'display'
p1003
S'17/26'
p1004
ssS'apply'
p1005
(dp1006
g1001
I83
sg1002
I0
sg1003
S'34/41'
p1007
ssS'change'
p1008
(dp1009
g1001
I41
sg1002
I0
sg1003
S'7/17'
p1010
ssS'query'
p1011
(dp1012
g1001
I91
sg1002
I0
sg1003
S'59/65'
p1013
ssssVWii
p1014
(dp1015
S'summary_display'
p1016
S'0/149'
p1017
sS'total_runs'
p1018
L2L
sS'summary_score'
p1019
I0
sS'results'
p1020
(dp1021
S'unapply'
p1022
(dp1023
S'score'
p1024
I0
sS'raw_score'
p1025
I0
sS'display'
p1026
S'0/26'
p1027
ssS'apply'
p1028
(dp1029
g1024
I0
sg1025
I0
sg1026
S'0/41'
p1030
ssS'change'
p1031
(dp1032
g1024
I0
sg1025
I0
sg1026
S'0/17'
p1033
ssS'query'
p1034
(dp1035
g1024
I0
sg1025
I0
sg1026
S'0/65'
p1036
ssssVFennec
p1037
(dp1038
S'summary_display'
p1039
S'117/149'
p1040
sS'total_runs'
p1041
L6L
sS'summary_score'
p1042
I79
sS'results'
p1043
(dp1044
S'unapply'
p1045
(dp1046
S'score'
p1047
I65
sS'raw_score'
p1048
I0
sS'display'
p1049
S'17/26'
p1050
ssS'apply'
p1051
(dp1052
g1047
I83
sg1048
I0
sg1049
S'34/41'
p1053
ssS'change'
p1054
(dp1055
g1047
I41
sg1048
I0
sg1049
S'7/17'
p1056
ssS'query'
p1057
(dp1058
g1047
I91
sg1048
I0
sg1049
S'59/65'
p1059
ssssVChrome Frame (IE 6)
p1060
(dp1061
S'summary_display'
p1062
S'129/149'
p1063
sS'total_runs'
p1064
L2L
sS'summary_score'
p1065
I87
sS'results'
p1066
(dp1067
S'unapply'
p1068
(dp1069
S'score'
p1070
I88
sS'raw_score'
p1071
I0
sS'display'
p1072
S'23/26'
p1073
ssS'apply'
p1074
(dp1075
g1070
I95
sg1071
I0
sg1072
S'39/41'
p1076
ssS'change'
p1077
(dp1078
g1070
I100
sg1071
I0
sg1072
S'17/17'
p1079
ssS'query'
p1080
(dp1081
g1070
I77
sg1071
I0
sg1072
S'50/65'
p1082
ssssVFirefox (Namoroka)
p1083
(dp1084
S'summary_display'
p1085
S'117/149'
p1086
sS'total_runs'
p1087
L48L
sS'summary_score'
p1088
I79
sS'results'
p1089
(dp1090
S'unapply'
p1091
(dp1092
S'score'
p1093
I65
sS'raw_score'
p1094
I0
sS'display'
p1095
S'17/26'
p1096
ssS'apply'
p1097
(dp1098
g1093
I83
sg1094
I0
sg1095
S'34/41'
p1099
ssS'change'
p1100
(dp1101
g1093
I41
sg1094
I0
sg1095
S'7/17'
p1102
ssS'query'
p1103
(dp1104
g1093
I91
sg1094
I0
sg1095
S'59/65'
p1105
ssssVArora
p1106
(dp1107
S'summary_display'
p1108
S'111/149'
p1109
sS'total_runs'
p1110
L26L
sS'summary_score'
p1111
I74
sS'results'
p1112
(dp1113
S'unapply'
p1114
(dp1115
S'score'
p1116
I35
sS'raw_score'
p1117
I0
sS'display'
p1118
S'9/26'
p1119
ssS'apply'
p1120
(dp1121
g1116
I85
sg1117
I0
sg1118
S'35/41'
p1122
ssS'change'
p1123
(dp1124
g1116
I100
sg1117
I0
sg1118
S'17/17'
p1125
ssS'query'
p1126
(dp1127
g1116
I77
sg1117
I0
sg1118
S'50/65'
p1128
ssssVFlock
p1129
(dp1130
S'summary_display'
p1131
S'117/149'
p1132
sS'total_runs'
p1133
L8L
sS'summary_score'
p1134
I79
sS'results'
p1135
(dp1136
S'unapply'
p1137
(dp1138
S'score'
p1139
I65
sS'raw_score'
p1140
I0
sS'display'
p1141
S'17/26'
p1142
ssS'apply'
p1143
(dp1144
g1139
I83
sg1140
I0
sg1141
S'34/41'
p1145
ssS'change'
p1146
(dp1147
g1139
I41
sg1140
I0
sg1141
S'7/17'
p1148
ssS'query'
p1149
(dp1150
g1139
I91
sg1140
I0
sg1141
S'59/65'
p1151
ssssViCab
p1152
(dp1153
S'summary_display'
p1154
S'125/149'
p1155
sS'total_runs'
p1156
L1L
sS'summary_score'
p1157
I84
sS'results'
p1158
(dp1159
S'unapply'
p1160
(dp1161
S'score'
p1162
I73
sS'raw_score'
p1163
I0
sS'display'
p1164
S'19/26'
p1165
ssS'apply'
p1166
(dp1167
g1162
I95
sg1163
I0
sg1164
S'39/41'
p1168
ssS'change'
p1169
(dp1170
g1162
I100
sg1163
I0
sg1164
S'17/17'
p1171
ssS'query'
p1172
(dp1173
g1162
I77
sg1163
I0
sg1164
S'50/65'
p1174
ssssVChrome Frame (IE 8)
p1175
(dp1176
S'summary_display'
p1177
S'129/149'
p1178
sS'total_runs'
p1179
L12L
sS'summary_score'
p1180
I87
sS'results'
p1181
(dp1182
S'unapply'
p1183
(dp1184
S'score'
p1185
I88
sS'raw_score'
p1186
I0
sS'display'
p1187
S'23/26'
p1188
ssS'apply'
p1189
(dp1190
g1185
I95
sg1186
I0
sg1187
S'39/41'
p1191
ssS'change'
p1192
(dp1193
g1185
I100
sg1186
I0
sg1187
S'17/17'
p1194
ssS'query'
p1195
(dp1196
g1185
I77
sg1186
I0
sg1187
S'50/65'
p1197
ssss. | apache-2.0 |
RoelAdriaans-B-informed/website | website_blog_facebook_comment/controllers/main.py | 27 | 2016 | # -*- coding: utf-8 -*-
# Python source code encoding : https://www.python.org/dev/peps/pep-0263/
##############################################################################
#
# This module copyright :
# (c) 2015 Antiun Ingenieria, SL (Madrid, Spain, http://www.antiun.com)
# Endika Iglesias <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.web import http
from openerp.addons.website_blog.controllers.main import WebsiteBlog
from openerp.http import request
class WebsiteBlog(WebsiteBlog):
@http.route([
"""/blog/<model('blog.blog'):blog>/post/"""
"""<model('blog.post', '[("blog_id","=", "blog[0]")]'):blog_post>"""],
type='http', auth="public", website=True)
def blog_post(self, blog, blog_post,
tag_id=None, page=1, enable_editor=None, **post):
response = super(WebsiteBlog, self).blog_post(
blog, blog_post, tag_id=None, page=1, enable_editor=None, **post)
response.qcontext['appId'] = request.website.facebook_appid
response.qcontext['lang'] = request.context['lang']
response.qcontext['numposts'] = request.website.facebook_numposts
response.qcontext['base_url'] = request.httprequest.url
return response
| agpl-3.0 |
cloudmesh/cloudmesh.docker | cloudmesh/api/docker_client.py | 2 | 17317 | #!/usr/bin/env python
# Docker class to connect to docker server box and perform docker operations
from __future__ import print_function
import cloudmesh
import docker
import os
import requests
import json
import sys
from cloudmesh.common.console import Console
from cloudmesh.common.Printer import Printer
import json
from cloudmesh.api.Rest_client import Rest
import time
class Docker(object):
def __init__(self, url):
os.environ["DOCKER_HOST"] = url
self.client = docker.from_env()
def host_create(self, addr, hostName=None):
"""Creates docker host
:param str addr: Address for docker
:param str hostName: Name of docker host
:returns: None
:rtype: NoneType
"""
try:
host = {}
host['Name'] = hostName
host['Ip'] = addr.split(':')[0]
host['Port'] = int(addr.split(':')[1])
host['Swarmmode'] = ''
host['SwarmmanagerIp'] = ''
host['Swarmhost'] = False
filter = {}
filter['Ip'] = addr.split(':')[0]
try:
scode, hosts = Rest.get('Host')
except Exception as e:
Console.error(e.message)
return
if len(hosts) != 0:
Console.ok('Host ' + hostName + ' is Added and is the default docker host')
return
r = Rest.post('Host', host, filter)
Console.ok('Host ' + hostName + ' is Added and is the default host')
except Exception as e:
Console.error(e.message)
return
def host_list(self):
"""List of docker containers
:returns: None
:rtype: NoneType
"""
try:
scode, hosts = Rest.get('Host')
except Exception as e:
Console.error(e.message)
return
if len(hosts) == 0:
print("No hosts exist")
return
n = 1
e = {}
for host in hosts:
d = {}
d['Ip'] = str(host['Ip'])
d['Name'] = str(host['Name'])
d['Port'] = str(host['Port'])
d['Swarmmode'] = str(host['Swarmmode'])
e[n] = d
n = n + 1
Console.ok(str(Printer.dict_table(e, order=['Ip', 'Name', 'Port', 'Swarmmode'])))
def host_delete(self, addr):
"""Deletes docker host
:param str addr: Address for docker
:returns: None
:rtype: NoneType
"""
try:
filter = {}
filter['Ip'] = addr.split(':')[0]
r = Rest.delete('Host', filter)
# Delete Host should delete all Containers and Networks for the host
r = Rest.delete('Container', filter)
r = Rest.delete('Network', filter)
Console.ok('Host ' + addr + ' is deleted')
except Exception as e:
Console.error(e.message)
return
def _container_action(self, action, msg, image, containerName=None, kwargs=None):
"""Creates docker container
:param str image: Available images for docker
:param str containerName: Name of docker container
:param list arg: custom args for container
:returns: str containeID: Id of the docker Container
:rtype: NoneType
"""
try:
container = action(image, name=containerName, detach=True, **kwargs)
data = []
container_dict = container.__dict__['attrs']
container_dict['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
# container_dict['State']['StartedAt'] = time.asctime(time.localtime(time.time()))
data.append(container_dict)
Rest.post('Container', data)
Console.ok('Container ' + container.name + ' is ' + msg)
return container.id
except docker.errors.APIError as e:
Console.error(e.explanation)
return
def container_create(self, image, containerName=None, kwargs=None):
"""Creates docker container
:param str image: Available images for docker
:param str containerName: Name of docker container
:param list arg: custom args for container
:returns: str containeID: Id of the docker Container
:rtype: NoneType
"""
# could possibly be replaced with
#
# return self._container_action(self.client.containers.create, "Created", image, containerName=None, kwargs=None)
#
try:
container = self.client.containers.create(image, name=containerName, detach=True, **kwargs)
data = []
container_dict = container.__dict__['attrs']
container_dict['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
# container_dict['State']['StartedAt'] = time.asctime(time.localtime(time.time()))
data.append(container_dict)
Rest.post('Container', data)
Console.ok('Container ' + container.name + ' is Created')
return container.id
except docker.errors.APIError as e:
Console.error(e.explanation)
return
def container_run(self, image, containerName=None, kwargs=None):
"""Creates docker container
:param str image: Available images for docker
:param str containerName: Name of docker container
:param list arg: custom args for container
:returns: str containeID: Id of the docker Container
:rtype: NoneType
"""
# could possibly be replaced with
#
# return self._container_action(self.client.containers.run, "Started", image, containerName=None, kwargs=None)
#
try:
container = self.client.containers.run(image, name=containerName, detach=True, **kwargs)
Console.ok("Container %s is created" % container.id)
data = []
container_dict = container.__dict__['attrs']
container_dict['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
# container_dict['State']['StartedAt'] = time.asctime(time.localtime(time.time()))
data.append(container_dict)
Rest.post('Container', data)
Console.ok('Container ' + container.name + ' is Started')
return container.id
except docker.errors.APIError as e:
Console.error(e.explanation)
return
def container_status_change(self, status=None, containerName=None, kwargs=None):
"""Change status of docker container
:param str status: Docker container status to be changed to
:param str containerName: Name of Docker container
:returns: None
:rtype: NoneType
"""
if status is None:
Console.info("No status specified")
return
try:
container = self.client.containers.get(containerName)
# need to check this ..
if status is "start":
container.start(**kwargs)
elif status is "pause":
container.pause(**kwargs)
elif status is "unpause":
container.unpause(**kwargs)
elif status is "stop":
container.stop(**kwargs)
else:
Console.error('Invalid Commmand')
return
container = self.client.containers.get(containerName)
filter = {}
container_dict = container.__dict__['attrs']
filter['Id'] = container_dict['Id']
filter['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
container_dict['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
Rest.post('Container', container_dict, filter)
Console.ok('Container ' + container.name + ' status changed to ' + status)
except docker.errors.APIError as e:
Console.error(e.explanation)
return
def container_delete(self, containerName=None, kwargs=None):
"""Deleting docker container
:param str containerName: Name of docker container
:returns: None
:rtype: NoneType
"""
try:
container = self.client.containers.get(containerName)
container.remove(**kwargs)
filter = {}
filter['Id'] = container.__dict__['attrs']['Id']
filter['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
Rest.delete('Container', filter)
Console.ok('Container ' + container.name + ' is deleted')
except docker.errors.APIError as e:
Console.error(e.explanation)
return
def container_list(self, kwargs=None):
"""List of docker containers
:returns: None
:rtype: NoneType
"""
try:
scode, containers = Rest.get('Container')
except docker.errors.APIError as e:
Console.error(e.explanation)
return
if len(containers) == 0:
print("No containers exist")
return
n = 1
e = {}
for container in containers:
d = {}
d['Ip'] = container['Ip']
d['Id'] = container['Id']
d['Name'] = container['Name']
d['Image'] = container['Config']['Image']
d['Status'] = container['State']['Status']
d['StartedAt'] = container['State']['StartedAt']
e[n] = d
n = n + 1
Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Image', 'Status', 'StartedAt'])))
def container_refresh(self, kwargs=None):
"""List of docker containers
:returns: None
:rtype: NoneType
"""
scode, hosts = Rest.get('Host')
filter = {}
n = 1
e = {}
data = []
for host in hosts:
os.environ["DOCKER_HOST"] = host['Ip'] + ":" + str(host['Port'])
self.client = docker.from_env()
filter['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
try:
containers = self.client.containers.list(all, **kwargs)
except docker.errors.APIError as e:
Console.error(e.explanation)
Rest.delete('Container', filter)
continue
if len(containers) == 0:
print("No containers exist " + str(host['Ip']))
Rest.delete('Container', filter)
continue
for containerm in containers:
container = containerm.__dict__['attrs']
container['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
data.append(container)
d = {}
d['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
d['Id'] = container['Id']
d['Name'] = container['Name']
d['Image'] = container['Config']['Image']
d['Status'] = container['State']['Status']
d['StartedAt'] = container['State']['StartedAt']
e[n] = d
n = n + 1
Rest.delete('Container', filter)
Rest.post('Container', data)
Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Image', 'Status', 'StartedAt'])))
def images_list(self, kwargs=None):
"""List of docker images
:returns: None
:rtype: NoneType
"""
try:
scode, images = Rest.get('Image')
except docker.errors.APIError as e:
Console.error(e.explanation)
return
if len(images) == 0:
Console.info("No images exist")
return
n = 1
e = {}
for image in images:
d = {}
d['Ip'] = image['Ip']
d['Id'] = image['Id']
if image['RepoTags'] == None:
d['Repository'] = image['RepoDigests'][0]
else:
d['Repository'] = image['RepoTags'][0]
# d['Size'] = image['Size']
d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2) # Converting the size to GB
e[n] = d
n = n + 1
Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))
def images_refresh(self, kwargs=None):
"""List of docker images
:returns: None
:rtype: NoneType
"""
scode, hosts = Rest.get('Host')
filter = {}
n = 1
e = {}
data = []
for host in hosts:
os.environ["DOCKER_HOST"] = host['Ip'] + ":" + str(host['Port'])
filter['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
self.client = docker.from_env()
try:
images = self.client.images.list(**kwargs)
except docker.errors.APIError as e:
Console.error(e.explanation)
return
if len(images) == 0:
Console.info("No images exist")
continue
for imagem in images:
image = imagem.__dict__['attrs']
image['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
data.append(image)
d = {}
d['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
d['Id'] = image['Id']
if image['RepoTags'] == None:
d['Repository'] = image['RepoDigests'][0]
else:
d['Repository'] = image['RepoTags'][0]
# d['Size'] = image['Size']
d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2)
e[n] = d
n = n + 1
Rest.delete('Image', filter)
Rest.post('Image', data)
Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))
def network_create(self, networkName=None, kwargs=None):
"""Creates docker network
:param str image: Available images for docker
:param str networkName: Name of docker container
:param list arg: custom args for container
:returns: str networkID: Id of the docker Container
:rtype: NoneType
"""
try:
network = self.client.networks.create(name=networkName, **kwargs)
data = []
network_dict = network.__dict__['attrs']
network_dict['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
data.append(network_dict)
Rest.post('Network', data)
Console.ok("Network %s is created" % network.Name)
return network.id
except docker.errors.APIError as e:
Console.error(e.explanation)
return
def network_list(self, kwargs=None):
"""List of docker networks
:returns: None
:rtype: NoneType
"""
try:
scode, networks = Rest.get('Network')
except docker.errors.APIError as e:
Console.error(e.explanation)
return
if len(networks) == 0:
Console.info("No network exist")
return
n = 1
e = {}
data = []
for network in networks:
d = {}
d['Ip'] = network['Ip']
d['Id'] = network['Id']
d['Name'] = network['Name']
d['Containers'] = network['Containers']
e[n] = d
n = n + 1
Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))
def network_refresh(self, kwargs=None):
"""List of docker networks
:returns: None
:rtype: NoneType
"""
scode, hosts = Rest.get('Host')
filter = {}
n = 1
e = {}
data = []
for host in hosts:
os.environ["DOCKER_HOST"] = host['Ip'] + ":" + str(host['Port'])
filter['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
self.client = docker.from_env()
try:
networks = self.client.networks.list(**kwargs)
except docker.errors.APIError as e:
Console.error(e.explanation)
continue
if len(networks) == 0:
Console.info("No network exist" + host['Ip'])
continue
for networkm in networks:
network = networkm.__dict__['attrs']
network['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
data.append(network)
d = {}
d['Ip'] = os.environ["DOCKER_HOST"].split(':')[0]
d['Id'] = network['Id']
d['Name'] = network['Name']
d['Containers'] = network['Containers']
e[n] = d
n = n + 1
r = Rest.delete('Network', filter)
r = Rest.post('Network', data)
Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))
def process_config(self):
Config = ConfigDict("docker.yaml",
verbose=True, load_order=[r'/home/ubuntu/git/cloudmesh.docker/config'])
Console.ok(Config['docker'])
| apache-2.0 |
2014c2g2/2014c2 | w2/static/Brython2.0.0-20140209-164925/Lib/base64.py | 733 | 13975 | #! /usr/bin/env python3
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodebytes', 'decodebytes',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
bytes_types = (bytes, bytearray) # Types acceptable as binary data
def _bytes_from_decode_data(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except UnicodeEncodeError:
raise ValueError('string argument should contain only ASCII characters')
elif isinstance(s, bytes_types):
return s
else:
raise TypeError("argument should be bytes or ASCII string, not %s" % s.__class__.__name__)
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a byte string using Base64.
s is the byte string to encode. Optional altchars must be a byte
string of length 2 which specifies an alternative alphabet for the
'+' and '/' characters. This allows an application to
e.g. generate url or filesystem safe Base64 strings.
The encoded byte string is returned.
"""
if not isinstance(s, bytes_types):
raise TypeError("expected bytes, not %s" % s.__class__.__name__)
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
if not isinstance(altchars, bytes_types):
raise TypeError("expected bytes, not %s"
% altchars.__class__.__name__)
assert len(altchars) == 2, repr(altchars)
return encoded.translate(bytes.maketrans(b'+/', altchars))
return encoded
def b64decode(s, altchars=None, validate=False):
"""Decode a Base64 encoded byte string.
s is the byte string to decode. Optional altchars must be a
string of length 2 which specifies the alternative alphabet used
instead of the '+' and '/' characters.
The decoded string is returned. A binascii.Error is raised if s is
incorrectly padded.
If validate is False (the default), non-base64-alphabet characters are
discarded prior to the padding check. If validate is True,
non-base64-alphabet characters in the input result in a binascii.Error.
"""
s = _bytes_from_decode_data(s)
if altchars is not None:
altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = s.translate(bytes.maketrans(altchars, b'+/'))
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
raise binascii.Error('Non-base64 digit found')
return binascii.a2b_base64(s)
def standard_b64encode(s):
"""Encode a byte string using the standard Base64 alphabet.
s is the byte string to encode. The encoded byte string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
"""
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
def urlsafe_b64encode(s):
"""Encode a byte string using a url-safe Base64 alphabet.
s is the byte string to encode. The encoded byte string is
returned. The alphabet uses '-' instead of '+' and '_' instead of
'/'.
"""
return b64encode(s).translate(_urlsafe_encode_translation)
def urlsafe_b64decode(s):
"""Decode a byte string encoded with the standard Base64 alphabet.
s is the byte string to decode. The decoded byte string is
returned. binascii.Error is raised if the input is incorrectly
padded or if there are non-alphabet characters present in the
input.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
s = _bytes_from_decode_data(s)
s = s.translate(_urlsafe_decode_translation)
return b64decode(s)
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: b'A', 9: b'J', 18: b'S', 27: b'3',
1: b'B', 10: b'K', 19: b'T', 28: b'4',
2: b'C', 11: b'L', 20: b'U', 29: b'5',
3: b'D', 12: b'M', 21: b'V', 30: b'6',
4: b'E', 13: b'N', 22: b'W', 31: b'7',
5: b'F', 14: b'O', 23: b'X',
6: b'G', 15: b'P', 24: b'Y',
7: b'H', 16: b'Q', 25: b'Z',
8: b'I', 17: b'R', 26: b'2',
}
_b32tab = [v[0] for k, v in sorted(_b32alphabet.items())]
_b32rev = dict([(v[0], k) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a byte string using Base32.
s is the byte string to encode. The encoded byte string is returned.
"""
if not isinstance(s, bytes_types):
raise TypeError("expected bytes, not %s" % s.__class__.__name__)
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
s = s + bytes(5 - leftover) # Don't use += !
quanta += 1
encoded = bytearray()
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
encoded += bytes([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
# Adjust for any leftover partial quanta
if leftover == 1:
encoded[-6:] = b'======'
elif leftover == 2:
encoded[-4:] = b'===='
elif leftover == 3:
encoded[-3:] = b'==='
elif leftover == 4:
encoded[-1:] = b'='
return bytes(encoded)
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
letter O (oh), and for optional mapping of the digit 1 (one) to
either the letter I (eye) or letter L (el). The optional argument
map01 when not None, specifies which letter the digit 1 should be
mapped to (when map01 is not None, the digit 0 is always mapped to
the letter O). For security purposes the default is None, so that
0 and 1 are not allowed in the input.
The decoded byte string is returned. binascii.Error is raised if
the input is incorrectly padded or if there are non-alphabet
characters present in the input.
"""
s = _bytes_from_decode_data(s)
quanta, leftover = divmod(len(s), 8)
if leftover:
raise binascii.Error('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01 is not None:
map01 = _bytes_from_decode_data(map01)
assert len(map01) == 1, repr(map01)
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search(b'(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise binascii.Error('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify(bytes('%010x' % acc, "ascii")))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify(bytes('%010x' % acc, "ascii"))
if padchars == 0:
last = b'' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise binascii.Error('Incorrect padding')
parts.append(last)
return b''.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a byte string using Base16.
s is the byte string to encode. The encoded byte string is returned.
"""
if not isinstance(s, bytes_types):
raise TypeError("expected bytes, not %s" % s.__class__.__name__)
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded byte string.
s is the byte string to decode. Optional casefold is a flag
specifying whether a lowercase alphabet is acceptable as input.
For security purposes, the default is False.
The decoded byte string is returned. binascii.Error is raised if
s were incorrectly padded or if there are non-alphabet characters
present in the string.
"""
s = _bytes_from_decode_data(s)
if casefold:
s = s.upper()
if re.search(b'[^0-9A-F]', s):
raise binascii.Error('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though. The files should be opened in binary mode.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file; input and output are binary files."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file; input and output are binary files."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def encodebytes(s):
"""Encode a bytestring into a bytestring containing multiple lines
of base-64 data."""
if not isinstance(s, bytes_types):
raise TypeError("expected bytes, not %s" % s.__class__.__name__)
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return b"".join(pieces)
def encodestring(s):
"""Legacy alias of encodebytes()."""
import warnings
warnings.warn("encodestring() is a deprecated alias, use encodebytes()",
DeprecationWarning, 2)
return encodebytes(s)
def decodebytes(s):
"""Decode a bytestring of base-64 data into a bytestring."""
if not isinstance(s, bytes_types):
raise TypeError("expected bytes, not %s" % s.__class__.__name__)
return binascii.a2b_base64(s)
def decodestring(s):
"""Legacy alias of decodebytes()."""
import warnings
warnings.warn("decodestring() is a deprecated alias, use decodebytes()",
DeprecationWarning, 2)
return decodebytes(s)
# Usable as a script...
def main():
"""Small main program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("""usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout.buffer)
else:
func(sys.stdin.buffer, sys.stdout.buffer)
def test():
s0 = b"Aladdin:open sesame"
print(repr(s0))
s1 = encodebytes(s0)
print(repr(s1))
s2 = decodebytes(s1)
print(repr(s2))
assert s0 == s2
if __name__ == '__main__':
main()
| gpl-2.0 |
angr/angr | tests/test_accuracy.py | 1 | 3876 | import nose
import angr
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(str(__file__))), '..', '..', 'binaries', 'tests')
arch_data = { # (steps, [hit addrs], finished)
'x86_64': (330, (0x1021c20, 0x1021980, 0x1021be0, 0x4004b0, 0x400440, 0x400570), True),
'i386': (425, (0x90198e0, 0x90195c0, 0x9019630, 0x90198a0, 0x8048370, 0x80482f8, 0x8048440, 0x804846D, 0x8048518), True),
'ppc': (381, (0x11022f50, 0x11022eb0, 0x10000340, 0x100002e8, 0x1000053C, 0x1000063C), True),
'ppc64': (372, (0x11047490, 0x100003fc, 0x10000368, 0x10000654, 0x10000770), True),
'mips': (363, (0x1016f20, 0x400500, 0x400470, 0x400640, 0x400750), True),
'mips64': (390, (0x12103b828, 0x120000870, 0x1200007e0, 0x120000A80, 0x120000B68), True),
'armel': (370, (0x10154b8, 0x1108244, 0x83a8, 0x8348, 0x84b0, 0x84E4, 0x85E8), True),
'aarch64': (370, (0x1020b04, 0x400430, 0x4003b8, 0x400538, 0x400570, 0x40062C), True),
}
def emulate(arch, binary, use_sim_procs, steps, hit_addrs, finished):
p = angr.Project(os.path.join(test_location, arch, binary), use_sim_procedures=use_sim_procs, rebase_granularity=0x1000000, load_debug_info=False)
state = p.factory.full_init_state(args=['./test_arrays'], add_options={angr.options.STRICT_PAGE_ACCESS, angr.options.ENABLE_NX, angr.options.ZERO_FILL_UNCONSTRAINED_MEMORY, angr.options.USE_SYSTEM_TIMES})
pg = p.factory.simulation_manager(state, resilience=True)
pg2 = pg.run(until=lambda lpg: len(lpg.active) != 1)
is_finished = False
if len(pg2.active) > 0:
state = pg2.active[0]
elif len(pg2.deadended) > 0:
state = pg2.deadended[0]
is_finished = True
elif len(pg2.errored) > 0:
state = pg2.errored[0].state # ErroredState object!
else:
raise ValueError("The result does not contain a state we can use for this test?")
nose.tools.assert_greater_equal(state.history.depth, steps)
# this is some wonky control flow that asserts that the items in hit_addrs appear in the state in order.
trace = state.history.bbl_addrs.hardcopy
reqs = list(hit_addrs)
while len(reqs) > 0:
req = reqs.pop(0)
while True:
nose.tools.assert_greater(len(trace), 0)
trace_head = trace.pop(0)
if trace_head == req:
break
nose.tools.assert_not_in(trace_head, reqs)
if finished:
nose.tools.assert_true(is_finished)
def test_emulation():
for arch in arch_data:
steps, hit_addrs, finished = arch_data[arch]
yield emulate, arch, 'test_arrays', False, steps, hit_addrs, finished
def test_windows():
yield emulate, 'i386', 'test_arrays.exe', True, 41, [], False # blocked on GetLastError or possibly dynamic loading
def test_locale():
p = angr.Project(os.path.join(test_location, 'i386', 'isalnum'), use_sim_procedures=False)
state = p.factory.full_init_state(args=['./isalnum'], add_options={angr.options.STRICT_PAGE_ACCESS})
pg = p.factory.simulation_manager(state)
pg2 = pg.run(until=lambda lpg: len(lpg.active) != 1,
step_func=lambda lpg: lpg if len(lpg.active) == 1 else lpg.prune()
)
nose.tools.assert_equal(len(pg2.active), 0)
nose.tools.assert_equal(len(pg2.deadended), 1)
nose.tools.assert_equal(pg2.deadended[0].history.events[-1].type, 'terminate')
nose.tools.assert_equal(pg2.deadended[0].history.events[-1].objects['exit_code']._model_concrete.value, 0)
if __name__ == '__main__':
#emulate('armel', 'test_arrays', False, *arch_data['armel'])
#import sys; sys.exit()
for func, a, b, c, d, e, f in test_windows():
print(a, b)
func(a, b, c, d, e, f)
print('locale')
test_locale()
for func, a, b, c, d, e, f in test_emulation():
print(a, b)
func(a, b, c, d, e, f)
| bsd-2-clause |
Denisolt/IEEE-NYIT-MA | local/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/escprober.py | 2936 | 3187 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
| gpl-3.0 |
wilecoyote2015/VerySharp | FlowCalculator.py | 1 | 9780 | # -*- coding: utf-8 -*-
"""
This file is part of verysharp,
copyright (c) 2016 Björn Sonnenschein.
verysharp is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
verysharp is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with verysharp. If not, see <http://www.gnu.org/licenses/>.
"""
import CommonFunctions
import cv2
import numpy as np
import os
## Provides a Method to calculate OpenCV maps for a series of images
# that can be used to correct seeing distortion with the OpenCV remap function
class FlowCalculator:
def __init__(self, config, scale_factor):
self.config = config
self.extension = int(config["FITS_Options"]["extension"])
self.scale_factor = scale_factor
self.optical_flow_output_directory = config["Filepaths"]["monitoring_images_output_directory"]
self.pyr_scale = float(config["Optical_Flow_Options"]["pyr_scale"])
self.levels = int(config["Optical_Flow_Options"]["levels"])
self.winsize = int(config["Optical_Flow_Options"]["winsize"])
self.iterations = int(config["Optical_Flow_Options"]["iterations"])
self.poly_n = int(config["Optical_Flow_Options"]["poly_n"])
self.poly_sigma = float(config["Optical_Flow_Options"]["poly_sigma"])
## calculate a Distortion map based on optical flow
# @param dataset ImageDataHolder object with filled hdulists, filled
# transform matrices and empty distortion maps
# @return dataset with filled tansform_matrices for upscaled images
# @todo: do not calculate distortion relative to reference image,
# but as average!
def calculateDistortionMaps(self, dataset):
# set the first image as reference
first_data = dataset.getData(0)
first_hdu_image = first_data["hdu_list"][self.extension].data
image_reference = CommonFunctions.preprocessHduImage(first_hdu_image,
self.scale_factor)
# @todo: for case that alignment is not relative to first image, also
# do alignment transformation for first image!
# calculate the optical flows for each image
list_optical_flows = self.calculateOpticalFlowsForDataset(image_reference,
dataset)
# calculate the mean optical flow in order to correct the seeing
# not relative to the reference image, but relative to the
# average shape which should approximate the real object better.
optical_flow_mean = self.calculateMeanOpticalFlow(list_optical_flows)
# calculate the distortion maps from optical flows for each image
num_optical_flows = len(list_optical_flows)
for index in range(num_optical_flows):
optical_flow = list_optical_flows[index]
# subtract mean optical flow
optical_flow -= optical_flow_mean
# create the distortion map from the optical flow vectorfield
distortion_map = self.convertOpticalFlowToDistortionMap(optical_flow)
# set the distortion map in the dataset for this image
dataset.setDistortionMap(index, distortion_map)
return dataset
## Calculates optical flows for a set of images relative to first image
# @param image_reference reference image as numpy float32 array
# @param dataset ImageDataHolder object with filled hdulists, filled
# transform matrices and empty distortion maps
# @return list object containing optical flows as numpy array
def calculateOpticalFlowsForDataset(self, image_reference, dataset):
# optical flows will be stored here
list_optical_flows = []
# add zero optical flow for the reference image which is at first position
shape_image_reference = image_reference.shape
shape_optical_flow = [shape_image_reference[0],
shape_image_reference[1],
2]
zero_optical_flow = np.zeros(shape_optical_flow, np.float32)
list_optical_flows.append(zero_optical_flow)
# iterate through the dataset and calculate the optical flow for each
# except the first one
num_images = dataset.getImageCount()
for index in range(1, num_images):
print ("calculating optical flow for image ", index)
# Get the image at the index
data = dataset.getData(index)
hdu_image = data["hdu_list"][self.extension].data
image = CommonFunctions.preprocessHduImage(hdu_image,
self.scale_factor)
# @todo: here, do not use config but simply check if matrix is None
# apply the transformation to the input image
if self.config["Processing_Options"]["align_images"] == "True":
# get the image dimension
image_shape = image.shape
# Transform the Image
image = cv2.warpAffine(image,
data["transform_matrix"],
(image_shape[1],image_shape[0]),
flags=cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP)
# calculate the optical flow (backwards for warping!)
optical_flow = cv2.calcOpticalFlowFarneback(image_reference,
image,
None,
self.pyr_scale,
self.levels,
self.winsize,
self.iterations,
self.poly_n,
self.poly_sigma,
cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
# Write out optical flow images for user evaluation
self.writeOpticalFlowImage(index, optical_flow)
list_optical_flows.append(optical_flow)
return list_optical_flows
## Average a list of optical flows
# @param list_optical_flows list object containing optical flows as numpy arrays
# @return averaged optical flow as numpy array
def calculateMeanOpticalFlow(self, list_optical_flows):
# create zero optical flow where other flows will be added and averaged
optical_flow_mean = np.zeros_like(list_optical_flows[0])
# average all optical flows in list
num_optical_flows = len(list_optical_flows)
for optical_flow in list_optical_flows:
optical_flow_mean += optical_flow / float(num_optical_flows)
return optical_flow_mean
## Calculate an OpenCV map that can be used to remap an image according
# to the optical flow vector field using OpenCV remap function
# @param optical_flow optical flow as numpy array
# @param distortion_map OpenCV map as numpy array
def convertOpticalFlowToDistortionMap(self, optical_flow):
# get x and y resolution of optical flow (and so also of image)
shape_optical_flow = optical_flow.shape[:-1]
# create empty distortion maps for x and y separately because
# opencv remap needs this
distortion_map_x = np.zeros(shape_optical_flow, np.float32) # only x and y
distortion_map_y = np.zeros(shape_optical_flow, np.float32) # only x and y
# fill the distortion maps
for x in range(shape_optical_flow[1]):
distortion_map_x[:,x] = optical_flow[:,x,0] + x
for y in range(shape_optical_flow[0]):
distortion_map_y[y] = optical_flow[y,:,1] + y
distortion_map = [distortion_map_x, distortion_map_y]
return distortion_map
## Create a colorful representation of the optical flow, where intensity
# denotes vector length and huw denotes vector direction
def writeOpticalFlowImage(self, index, optical_flow):
filename = "flow_" + str(index) + ".png"
output_path = os.path.join(self.optical_flow_output_directory, filename)
# create hsv image
shape_optical_flow = optical_flow.shape[:-1]
shape_hsv = [shape_optical_flow[0], shape_optical_flow[1], 3]
hsv = np.zeros(shape_hsv, np.float32)
# set saturation to 255
hsv[:,:,1] = 255
# create colorful illustration of optical flow
mag, ang = cv2.cartToPolar(optical_flow[:,:,0], optical_flow[:,:,1])
hsv[:,:,0] = ang*180/np.pi/2
hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
cv2.imwrite(output_path, bgr) | gpl-3.0 |
hamishwillee/ardupilot | Tools/autotest/param_metadata/htmlemit.py | 13 | 3279 | #!/usr/bin/env python
"""
Emit docs in a form acceptable to the old Ardupilot wordpress docs site
"""
from param import known_param_fields, known_units
from emit import Emit
import cgi
class HtmlEmit(Emit):
def __init__(self):
Emit.__init__(self)
html_fname = 'Parameters.html'
self.f = open(html_fname, mode='w')
self.preamble = """<!-- Dynamically generated list of documented parameters
This page was generated using Tools/autotest/param_metadata/param_parse.py
DO NOT EDIT
-->
<h3 style="text-align: center">Complete Parameter List</h3>
<hr />
<p>This is a complete list of the parameters which can be set via the MAVLink protocol in the EEPROM of your APM to control vehicle behaviour. This list is automatically generated from the latest ardupilot source code, and so may contain parameters which are not yet in the stable released versions of the code.</p>
<!-- add auto-generated table of contents with "Table of Contents Plus" plugin -->
[toc exclude="Complete Parameter List"]
"""
self.t = ''
def escape(self, s):
s = s.replace(' ', '-')
s = s.replace(':', '-')
s = s.replace('(', '')
s = s.replace(')', '')
return s
def close(self):
self.f.write(self.preamble)
self.f.write(self.t)
self.f.close()
def start_libraries(self):
pass
def emit(self, g):
tag = '%s Parameters' % g.name
t = '\n\n<h1>%s</h1>\n' % tag
for param in g.params:
if not hasattr(param, 'DisplayName') or not hasattr(param, 'Description'):
continue
d = param.__dict__
tag = '%s (%s)' % (param.DisplayName, param.name)
t += '\n\n<h2>%s</h2>' % tag
if d.get('User', None) == 'Advanced':
t += '<em>Note: This parameter is for advanced users</em><br>'
t += "\n\n<p>%s</p>\n" % cgi.escape(param.Description)
t += "<ul>\n"
for field in param.__dict__.keys():
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]):
values = (param.__dict__[field]).split(',')
t += "<table><th>Value</th><th>Meaning</th>\n"
for value in values:
v = value.split(':')
if len(v) != 2:
raise ValueError("Bad value (%s)" % v)
t += "<tr><td>%s</td><td>%s</td></tr>\n" % (v[0], v[1])
t += "</table>\n"
elif field == 'Units':
abreviated_units = param.__dict__[field]
if abreviated_units != '':
units = known_units[abreviated_units] # use the known_units dictionary to convert the abreviated unit into a full textual one
t += "<li>%s: %s</li>\n" % (field, cgi.escape(units))
else:
t += "<li>%s: %s</li>\n" % (field, cgi.escape(param.__dict__[field]))
t += "</ul>\n"
self.t += t
| gpl-3.0 |
bud4/samba | third_party/dnspython/examples/zonediff.py | 79 | 10711 | #!/usr/bin/env python
#
# Small library and commandline tool to do logical diffs of zonefiles
# ./zonediff -h gives you help output
#
# Requires dnspython to do all the heavy lifting
#
# (c)2009 Dennis Kaarsemaker <[email protected]>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""See diff_zones.__doc__ for more information"""
__all__ = ['diff_zones', 'format_changes_plain', 'format_changes_html']
try:
import dns.zone
except ImportError:
import sys
sys.stderr.write("Please install dnspython")
sys.exit(1)
def diff_zones(zone1, zone2, ignore_ttl=False, ignore_soa=False):
"""diff_zones(zone1, zone2, ignore_ttl=False, ignore_soa=False) -> changes
Compares two dns.zone.Zone objects and returns a list of all changes
in the format (name, oldnode, newnode).
If ignore_ttl is true, a node will not be added to this list if the
only change is its TTL.
If ignore_soa is true, a node will not be added to this list if the
only changes is a change in a SOA Rdata set.
The returned nodes do include all Rdata sets, including unchanged ones.
"""
changes = []
for name in zone1:
name = str(name)
n1 = zone1.get_node(name)
n2 = zone2.get_node(name)
if not n2:
changes.append((str(name), n1, n2))
elif _nodes_differ(n1, n2, ignore_ttl, ignore_soa):
changes.append((str(name), n1, n2))
for name in zone2:
n1 = zone1.get_node(name)
if not n1:
n2 = zone2.get_node(name)
changes.append((str(name), n1, n2))
return changes
def _nodes_differ(n1, n2, ignore_ttl, ignore_soa):
if ignore_soa or not ignore_ttl:
# Compare datasets directly
for r in n1.rdatasets:
if ignore_soa and r.rdtype == dns.rdatatype.SOA:
continue
if r not in n2.rdatasets:
return True
if not ignore_ttl:
return r.ttl != n2.find_rdataset(r.rdclass, r.rdtype).ttl
for r in n2.rdatasets:
if ignore_soa and r.rdtype == dns.rdatatype.SOA:
continue
if r not in n1.rdatasets:
return True
else:
return n1 != n2
def format_changes_plain(oldf, newf, changes, ignore_ttl=False):
"""format_changes(oldfile, newfile, changes, ignore_ttl=False) -> str
Given 2 filenames and a list of changes from diff_zones, produce diff-like
output. If ignore_ttl is True, TTL-only changes are not displayed"""
ret = "--- %s\n+++ %s\n" % (oldf, newf)
for name, old, new in changes:
ret += "@ %s\n" % name
if not old:
for r in new.rdatasets:
ret += "+ %s\n" % str(r).replace('\n','\n+ ')
elif not new:
for r in old.rdatasets:
ret += "- %s\n" % str(r).replace('\n','\n+ ')
else:
for r in old.rdatasets:
if r not in new.rdatasets or (r.ttl != new.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl):
ret += "- %s\n" % str(r).replace('\n','\n+ ')
for r in new.rdatasets:
if r not in old.rdatasets or (r.ttl != old.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl):
ret += "+ %s\n" % str(r).replace('\n','\n+ ')
return ret
def format_changes_html(oldf, newf, changes, ignore_ttl=False):
"""format_changes(oldfile, newfile, changes, ignore_ttl=False) -> str
Given 2 filenames and a list of changes from diff_zones, produce nice html
output. If ignore_ttl is True, TTL-only changes are not displayed"""
ret = '''<table class="zonediff">
<thead>
<tr>
<th> </th>
<th class="old">%s</th>
<th class="new">%s</th>
</tr>
</thead>
<tbody>\n''' % (oldf, newf)
for name, old, new in changes:
ret += ' <tr class="rdata">\n <td class="rdname">%s</td>\n' % name
if not old:
for r in new.rdatasets:
ret += ' <td class="old"> </td>\n <td class="new">%s</td>\n' % str(r).replace('\n','<br />')
elif not new:
for r in old.rdatasets:
ret += ' <td class="old">%s</td>\n <td class="new"> </td>\n' % str(r).replace('\n','<br />')
else:
ret += ' <td class="old">'
for r in old.rdatasets:
if r not in new.rdatasets or (r.ttl != new.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl):
ret += str(r).replace('\n','<br />')
ret += '</td>\n'
ret += ' <td class="new">'
for r in new.rdatasets:
if r not in old.rdatasets or (r.ttl != old.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl):
ret += str(r).replace('\n','<br />')
ret += '</td>\n'
ret += ' </tr>\n'
return ret + ' </tbody>\n</table>'
# Make this module usable as a script too.
if __name__ == '__main__':
import optparse
import subprocess
import sys
import traceback
usage = """%prog zonefile1 zonefile2 - Show differences between zones in a diff-like format
%prog [--git|--bzr|--rcs] zonefile rev1 [rev2] - Show differences between two revisions of a zonefile
The differences shown will be logical differences, not textual differences.
"""
p = optparse.OptionParser(usage=usage)
p.add_option('-s', '--ignore-soa', action="store_true", default=False, dest="ignore_soa",
help="Ignore SOA-only changes to records")
p.add_option('-t', '--ignore-ttl', action="store_true", default=False, dest="ignore_ttl",
help="Ignore TTL-only changes to Rdata")
p.add_option('-T', '--traceback', action="store_true", default=False, dest="tracebacks",
help="Show python tracebacks when errors occur")
p.add_option('-H', '--html', action="store_true", default=False, dest="html",
help="Print HTML output")
p.add_option('-g', '--git', action="store_true", default=False, dest="use_git",
help="Use git revisions instead of real files")
p.add_option('-b', '--bzr', action="store_true", default=False, dest="use_bzr",
help="Use bzr revisions instead of real files")
p.add_option('-r', '--rcs', action="store_true", default=False, dest="use_rcs",
help="Use rcs revisions instead of real files")
opts, args = p.parse_args()
opts.use_vc = opts.use_git or opts.use_bzr or opts.use_rcs
def _open(what, err):
if isinstance(what, basestring):
# Open as normal file
try:
return open(what, 'rb')
except:
sys.stderr.write(err + "\n")
if opts.tracebacks:
traceback.print_exc()
else:
# Must be a list, open subprocess
try:
proc = subprocess.Popen(what, stdout=subprocess.PIPE)
proc.wait()
if proc.returncode == 0:
return proc.stdout
sys.stderr.write(err + "\n")
except:
sys.stderr.write(err + "\n")
if opts.tracebacks:
traceback.print_exc()
if not opts.use_vc and len(args) != 2:
p.print_help()
sys.exit(64)
if opts.use_vc and len(args) not in (2,3):
p.print_help()
sys.exit(64)
# Open file desriptors
if not opts.use_vc:
oldn, newn = args
else:
if len(args) == 3:
filename, oldr, newr = args
oldn = "%s:%s" % (oldr, filename)
newn = "%s:%s" % (newr, filename)
else:
filename, oldr = args
newr = None
oldn = "%s:%s" % (oldr, filename)
newn = filename
old, new = None, None
oldz, newz = None, None
if opts.use_bzr:
old = _open(["bzr", "cat", "-r" + oldr, filename],
"Unable to retrieve revision %s of %s" % (oldr, filename))
if newr != None:
new = _open(["bzr", "cat", "-r" + newr, filename],
"Unable to retrieve revision %s of %s" % (newr, filename))
elif opts.use_git:
old = _open(["git", "show", oldn],
"Unable to retrieve revision %s of %s" % (oldr, filename))
if newr != None:
new = _open(["git", "show", newn],
"Unable to retrieve revision %s of %s" % (newr, filename))
elif opts.use_rcs:
old = _open(["co", "-q", "-p", "-r" + oldr, filename],
"Unable to retrieve revision %s of %s" % (oldr, filename))
if newr != None:
new = _open(["co", "-q", "-p", "-r" + newr, filename],
"Unable to retrieve revision %s of %s" % (newr, filename))
if not opts.use_vc:
old = _open(oldn, "Unable to open %s" % oldn)
if not opts.use_vc or newr == None:
new = _open(newn, "Unable to open %s" % newn)
if not old or not new:
sys.exit(65)
# Parse the zones
try:
oldz = dns.zone.from_file(old, origin = '.', check_origin=False)
except dns.exception.DNSException:
sys.stderr.write("Incorrect zonefile: %s\n", old)
if opts.tracebacks:
traceback.print_exc()
try:
newz = dns.zone.from_file(new, origin = '.', check_origin=False)
except dns.exception.DNSException:
sys.stderr.write("Incorrect zonefile: %s\n" % new)
if opts.tracebacks:
traceback.print_exc()
if not oldz or not newz:
sys.exit(65)
changes = diff_zones(oldz, newz, opts.ignore_ttl, opts.ignore_soa)
changes.sort()
if not changes:
sys.exit(0)
if opts.html:
print format_changes_html(oldn, newn, changes, opts.ignore_ttl)
else:
print format_changes_plain(oldn, newn, changes, opts.ignore_ttl)
sys.exit(1)
| gpl-3.0 |
PaulStoffregen/Arduino-1.6.5r4-Teensyduino | arduino-core/src/processing/app/i18n/python/requests/compat.py | 289 | 2433 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import charade as chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| lgpl-2.1 |
msumit/qds-sdk-py | tests/test_app.py | 6 | 5993 | from __future__ import print_function
import sys
import os
if sys.version_info > (2, 7, 0):
import unittest
else:
import unittest2 as unittest
from mock import Mock
sys.path.append(os.path.join(os.path.dirname(__file__), '../bin'))
import qds
from qds_sdk.connection import Connection
from test_base import print_command
from test_base import QdsCliTestCase
class TestAppList(QdsCliTestCase):
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'list']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "apps", params=None)
class TestAppShow(QdsCliTestCase):
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'show', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "apps/123", params=None)
def test_fail_with_no_id(self):
sys.argv = ['qds.py', 'app', 'show']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_bad_id(self):
sys.argv = ['qds.py', 'app', 'show', 'notanumber']
print_command()
with self.assertRaises(SystemExit):
qds.main()
class TestAppCreate(QdsCliTestCase):
def test_fail_with_no_argument(self):
sys.argv = ['qds.py', 'app', 'create']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("POST", "apps",
{'name': 'appname',
'kind': 'spark',
'config': {}})
def test_with_kind(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname',
'--kind', 'spark']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("POST", "apps",
{'name': 'appname',
'kind': 'spark',
'config': {}})
def test_fail_with_wrong_kind(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname',
'--kind', 'tez'] # tez apps are not supported yet.
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_with_config(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'zeppelin.spark.concurrentSQL=true']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with(
"POST", "apps", {'name': 'appname', 'kind': 'spark',
'config': {
'zeppelin.spark.concurrentSQL': 'true'}})
def test_with_configs(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'spark.executor.memory=2g',
'zeppelin.spark.concurrentSQL=true']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with(
"POST", "apps", {'name': 'appname', 'kind': 'spark',
'config': {
'spark.executor.memory': '2g',
'zeppelin.spark.concurrentSQL': 'true'}})
def test_fail_with_bad_config_1(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'no-equal-sign']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_bad_config_2(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'multiple=equal=sign']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_good_and_bad_config(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'this=good', 'no-equal-sign']
print_command()
with self.assertRaises(SystemExit):
qds.main()
class TestAppStop(QdsCliTestCase):
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'stop', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("PUT", "apps/123/stop", None)
def test_fail_with_no_id(self):
sys.argv = ['qds.py', 'app', 'stop']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_bad_id(self):
sys.argv = ['qds.py', 'app', 'stop', 'notanumber']
print_command()
with self.assertRaises(SystemExit):
qds.main()
class TestAppDelete(QdsCliTestCase):
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'delete', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("DELETE", "apps/123", None)
def test_fail_with_no_id(self):
sys.argv = ['qds.py', 'app', 'delete']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_bad_id(self):
sys.argv = ['qds.py', 'app', 'delete', 'notanumber']
print_command()
with self.assertRaises(SystemExit):
qds.main()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Michael-Tu/tools | markdown_to_json/blog-content-json-generator.py | 1 | 7563 | import argparse
import json
import re
common_unicode_conversion = {
"\u2018": "'",
"\u2019": "'"
}
# regex for different styles
style = {
"link": "\[([^\(\)\[\]\!]+)\]\(([^\(\)\[\]\!]+)\)",
"image": "!\[([^\(\)\[\]\!]*)\]\(([^\(\)\[\]\!]+)\)",
"bold": "(?:\*{2}([^\s].*?[^\s]|[^\s])\*{2})|(?:_{2}([^\s].*?[^\s]|[^\s])_{2})",
"italic": "(?:\*([^\s].*?[^\s]|[^\s])\*)|(?:_([^\s].*?[^\s]|[^\s])_)",
"code": "`.+?`",
"orderedList": "\s*^\s*\d+\.\s",
"iframe": "<iframe.+>(.*)</iframe>",
"class": "<iframe.+class=['\"](.*?)['\"].*>.*"
}
for k in style:
style[k] = re.compile(style[k])
def is_comment(line):
line = line.strip()
return '<!--' in line or '-->' in line
def content_to_keep(line):
if type(line) is not str:
return False
line = line.strip()
return len(line) > 0 and not is_comment(line)
def replace_unicode(line):
for k in common_unicode_conversion:
line = line.replace(k, common_unicode_conversion[k])
return line
def divider(line):
if len(line.strip().replace("-","")) != 0:
raise Exception("Invalid divider format: {0}".format(line))
return "<hr>"
def bold(content, prefix_free=False):
if not prefix_free:
content = content.strip()[2:-2].strip()
return "<b>{0}</b>".format(content.strip())
def italicize(content):
return "<i>{0}</i>".format(content.strip()[1:-1].strip())
def code(content, prefix_free=False, pre_enabled=False, code_language=""):
if not prefix_free:
content = content.strip()[1:-1]
if pre_enabled:
format_str = "<pre><code class='{0}'>{1}</code></pre>"
else:
format_str = "<code class='{0}'>{1}</code>"
return format_str.format(code_language, content)
def link(content):
# assume it's not an image
content = content.strip()
try:
data = style['link'].findall(content)
# WARNING: only the first link is returned
text = data[0][0]
url = data[0][1]
return "<a href='{0}' target='blank'>{1}</a>".format(url, text)
except Exception as e:
raise Exception("Invalid link format: {0}".format(content))
def headings(line):
res = {
"class": "lead",
"content": ""
}
line = line.strip()
try:
i = 0
while i < len(line) and line[i] == "#":
i += 1
line = line[i:]
res['content'] = bold(line, prefix_free=True)
return res
except Exception as e:
raise Exception("Invalid headings format: {0}".format(line))
def image(line):
res = {
"type": "image",
"url": ""
}
line = line.strip()
try:
data = style['image'].findall(line)
# WARNING: only the first image is returned
if len(data[0][0].strip()) > 0:
res['caption'] = data[0][0]
res['url'] = data[0][1]
return res
except Exception as e:
raise Exception("Invalid image format: {0}".format(line))
def block(line):
line = line.strip()
if line[0] != '>':
raise Exception("Invalid blockquote format: {0}".format(line))
line = line[1:].strip()
return '<blockquote>{0}</blockquote>'.format(line)
def ordered_list(values):
return {
"type": "ordered-list",
"content": values
}
def unordered_list(values):
return {
"type": "unordered-list",
"content": values
}
style_function = {
"link": link,
"image": image,
"bold": bold,
"italic": italicize,
"code": code,
"ordered_list": ordered_list,
"unordered_list": unordered_list
}
def htmlfy(line):
for key in ["bold", "italic", "link", "code"]:
match = style[key].search(line)
while match is not None:
line = line[:match.start()] + style_function[key](match.group()) + line[match.end():]
match = style[key].search(line)
return line
def iframe(d):
d = d.strip()
container = "<div class='row'><div class='col-sm-8 col-sm-offset-2 text-center'>{0}</div></div>"
container2 = "<div class='embed-video-container embed-responsive embed-responsive-16by9' style='margin-bottom:0px'>{0}</div>"
match = style['iframe'].match(d)
word = htmlfy(match.groups()[0])
content = d.replace(word, "")
match2 = style['class'].match(content)
if match2 is not None and 'embed-responsive-item' in match2.groups()[0]:
content = container2.format(content)
content = container.format(content + word)
return content
def process(d):
"""Process general information, except multiple line code/list"""
# image
if len(d) >= 5 and d[:2] == '![':
return image(d)
# heading
elif d[0] == "#":
return headings(htmlfy(d))
# block
elif d[0] == ">":
return block(d)
elif d[:3] == "---":
return divider(d)
else:
return htmlfy(d)
def creat_json(file):
# read in the file
with open(file, "r") as f:
data = f.read().strip().split("\n")
# filter out empty lines and comments
data = list(filter(content_to_keep, data))
# replace common unicode with english characters
data = list(map(replace_unicode, data))
# deal with pre-formated code first
code_multiple_line_detected = False
code_language = ""
code_block_so_far = []
new_data = []
for d in data:
if len(d.strip()) >= 3 and d.strip()[:3] == '```':
if code_multiple_line_detected:
code_multiple_line_detected = False
new_data.append(code("\n".join(code_block_so_far), prefix_free=True, pre_enabled=True, code_language=code_language))
code_block_so_far = []
else:
code_multiple_line_detected = True
code_language = d.strip()[3:].strip()
continue
elif code_multiple_line_detected:
code_block_so_far.append(d)
else:
new_data.append(d)
if code_multiple_line_detected:
new_data.append(code("\n".join(code_block_so_far), prefix_free=True))
data = new_data
# strip leading and trailing empty spaces for all non-empty text
data = list(map(lambda x: x.strip(), data))
res = list()
# process data
ordered_list_detected = False
ordered_list_so_far = []
unordered_list_detected = False
unordered_list_so_far = []
for d in data:
if "<pre>" in d and "</code>" in d:
res.append(d)
continue
if len(d) >= 2 and (d[:2] == '- ' or d[:2] == '* '):
if ordered_list_detected:
ordered_list_detected = False
res.append(ordered_list(ordered_list_so_far))
ordered_list_so_far = []
unordered_list_detected = True
unordered_list_so_far.append(process(d[2:].strip()))
elif style['iframe'].search(d) is not None:
res.append(iframe(d))
elif style['orderedList'].search(d) is not None:
if unordered_list_detected:
unordered_list_detected = False
res.append(unordered_list(unordered_list_so_far))
unordered_list_so_far = []
ordered_list_detected = True
match = style['orderedList'].search(d)
ordered_list_so_far.append(process(d[match.end():].strip()))
else:
if ordered_list_detected:
ordered_list_detected = False
res.append(ordered_list(ordered_list_so_far))
ordered_list_so_far = []
elif unordered_list_detected:
unordered_list_detected = False
res.append(unordered_list(unordered_list_so_far))
unordered_list_so_far = []
res.append(process(d))
if ordered_list_detected:
ordered_list_detected = False
res.append(ordered_list(ordered_list_so_far))
elif unordered_list_detected:
unordered_list_detected = False
res.append(unordered_list(unordered_list_so_far))
# save file
saveName = "{0}.json".format(file.split(".")[0])
with open(saveName, "w") as f:
json.dump({"blog-content": res}, f, indent=2)
return saveName
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="a parser that converts a text file to a json file according to stac blog content guideline")
parser.add_argument("source", type=str, help="<name>.txt")
args = parser.parse_args()
try:
print("json file saved as {0}".format(creat_json(args.source)))
print("SUCCESS")
except Exception as e:
print("FAILED\n")
print(e)
| mit |
ygol/odoo | addons/account/wizard/account_financial_report.py | 345 | 5505 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class accounting_report(osv.osv_memory):
_name = "accounting.report"
_inherit = "account.common.report"
_description = "Accounting Report"
_columns = {
'enable_filter': fields.boolean('Enable Comparison'),
'account_report_id': fields.many2one('account.financial.report', 'Account Reports', required=True),
'label_filter': fields.char('Column Label', help="This label will be displayed on report to show the balance computed for the given comparison filter."),
'fiscalyear_id_cmp': fields.many2one('account.fiscalyear', 'Fiscal Year', help='Keep empty for all open fiscal year'),
'filter_cmp': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods')], "Filter by", required=True),
'period_from_cmp': fields.many2one('account.period', 'Start Period'),
'period_to_cmp': fields.many2one('account.period', 'End Period'),
'date_from_cmp': fields.date("Start Date"),
'date_to_cmp': fields.date("End Date"),
'debit_credit': fields.boolean('Display Debit/Credit Columns', help="This option allows you to get more details about the way your balances are computed. Because it is space consuming, we do not allow to use it while doing a comparison."),
}
def _get_account_report(self, cr, uid, context=None):
# TODO deprecate this it doesnt work in web
menu_obj = self.pool.get('ir.ui.menu')
report_obj = self.pool.get('account.financial.report')
report_ids = []
if context.get('active_id'):
menu = menu_obj.browse(cr, uid, context.get('active_id')).name
report_ids = report_obj.search(cr, uid, [('name','ilike',menu)])
return report_ids and report_ids[0] or False
_defaults = {
'filter_cmp': 'filter_no',
'target_move': 'posted',
'account_report_id': _get_account_report,
}
def _build_comparison_context(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = {}
result['fiscalyear'] = 'fiscalyear_id_cmp' in data['form'] and data['form']['fiscalyear_id_cmp'] or False
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['chart_account_id'] = 'chart_account_id' in data['form'] and data['form']['chart_account_id'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
if data['form']['filter_cmp'] == 'filter_date':
result['date_from'] = data['form']['date_from_cmp']
result['date_to'] = data['form']['date_to_cmp']
elif data['form']['filter_cmp'] == 'filter_period':
if not data['form']['period_from_cmp'] or not data['form']['period_to_cmp']:
raise osv.except_osv(_('Error!'),_('Select a starting and an ending period'))
result['period_from'] = data['form']['period_from_cmp']
result['period_to'] = data['form']['period_to_cmp']
return result
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
res = super(accounting_report, self).check_report(cr, uid, ids, context=context)
data = {}
data['form'] = self.read(cr, uid, ids, ['account_report_id', 'date_from_cmp', 'date_to_cmp', 'fiscalyear_id_cmp', 'journal_ids', 'period_from_cmp', 'period_to_cmp', 'filter_cmp', 'chart_account_id', 'target_move'], context=context)[0]
for field in ['fiscalyear_id_cmp', 'chart_account_id', 'period_from_cmp', 'period_to_cmp', 'account_report_id']:
if isinstance(data['form'][field], tuple):
data['form'][field] = data['form'][field][0]
comparison_context = self._build_comparison_context(cr, uid, ids, data, context=context)
res['data']['form']['comparison_context'] = comparison_context
return res
def _print_report(self, cr, uid, ids, data, context=None):
data['form'].update(self.read(cr, uid, ids, ['date_from_cmp', 'debit_credit', 'date_to_cmp', 'fiscalyear_id_cmp', 'period_from_cmp', 'period_to_cmp', 'filter_cmp', 'account_report_id', 'enable_filter', 'label_filter','target_move'], context=context)[0])
return self.pool['report'].get_action(cr, uid, [], 'account.report_financial', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
richardw347/roboclaw_python | src/roboclaw_python/roboclaw.py | 1 | 34451 | import random
import serial
import struct
import time
class Roboclaw:
'Roboclaw Interface Class'
def __init__(self, comport, rate, timeout=0.01, retries=3):
self.comport = comport
self.rate = rate
self.timeout = timeout
self._trystimeout = retries
self._crc = 0
# Command Enums
class Cmd():
M1FORWARD = 0
M1BACKWARD = 1
SETMINMB = 2
SETMAXMB = 3
M2FORWARD = 4
M2BACKWARD = 5
M17BIT = 6
M27BIT = 7
MIXEDFORWARD = 8
MIXEDBACKWARD = 9
MIXEDRIGHT = 10
MIXEDLEFT = 11
MIXEDFB = 12
MIXEDLR = 13
GETM1ENC = 16
GETM2ENC = 17
GETM1SPEED = 18
GETM2SPEED = 19
RESETENC = 20
GETVERSION = 21
SETM1ENCCOUNT = 22
SETM2ENCCOUNT = 23
GETMBATT = 24
GETLBATT = 25
SETMINLB = 26
SETMAXLB = 27
SETM1PID = 28
SETM2PID = 29
GETM1ISPEED = 30
GETM2ISPEED = 31
M1DUTY = 32
M2DUTY = 33
MIXEDDUTY = 34
M1SPEED = 35
M2SPEED = 36
MIXEDSPEED = 37
M1SPEEDACCEL = 38
M2SPEEDACCEL = 39
MIXEDSPEEDACCEL = 40
M1SPEEDDIST = 41
M2SPEEDDIST = 42
MIXEDSPEEDDIST = 43
M1SPEEDACCELDIST = 44
M2SPEEDACCELDIST = 45
MIXEDSPEEDACCELDIST = 46
GETBUFFERS = 47
GETPWMS = 48
GETCURRENTS = 49
MIXEDSPEED2ACCEL = 50
MIXEDSPEED2ACCELDIST = 51
M1DUTYACCEL = 52
M2DUTYACCEL = 53
MIXEDDUTYACCEL = 54
READM1PID = 55
READM2PID = 56
SETMAINVOLTAGES = 57
SETLOGICVOLTAGES = 58
GETMINMAXMAINVOLTAGES = 59
GETMINMAXLOGICVOLTAGES = 60
SETM1POSPID = 61
SETM2POSPID = 62
READM1POSPID = 63
READM2POSPID = 64
M1SPEEDACCELDECCELPOS = 65
M2SPEEDACCELDECCELPOS = 66
MIXEDSPEEDACCELDECCELPOS = 67
SETM1DEFAULTACCEL = 68
SETM2DEFAULTACCEL = 69
SETPINFUNCTIONS = 74
GETPINFUNCTIONS = 75
SETDEADBAND = 76
GETDEADBAND = 77
RESTOREDEFAULTS = 80
GETTEMP = 82
GETTEMP2 = 83
GETERROR = 90
GETENCODERMODE = 91
SETM1ENCODERMODE = 92
SETM2ENCODERMODE = 93
WRITENVM = 94
READNVM = 95
SETCONFIG = 98
GETCONFIG = 99
SETM1MAXCURRENT = 133
SETM2MAXCURRENT = 134
GETM1MAXCURRENT = 135
GETM2MAXCURRENT = 136
SETPWMMODE = 148
GETPWMMODE = 149
FLAGBOOTLOADER = 255
# Private Functions
def crc_clear(self):
self._crc = 0
return
def crc_update(self, data):
self._crc = self._crc ^ (data << 8)
for bit in range(0, 8):
if (self._crc & 0x8000) == 0x8000:
self._crc = ((self._crc << 1) ^ 0x1021)
else:
self._crc = self._crc << 1
return
def _sendcommand(self, address, command):
self.crc_clear()
self.crc_update(address)
self._port.write(chr(address))
self.crc_update(command)
self._port.write(chr(command))
return
def _readchecksumword(self):
data = self._port.read(2)
if len(data) == 2:
crc = (ord(data[0]) << 8) | ord(data[1])
return (1, crc)
return (0, 0)
def _readbyte(self):
data = self._port.read(1)
if len(data):
val = ord(data)
self.crc_update(val)
return (1, val)
return (0, 0)
def _readword(self):
val1 = self._readbyte()
if val1[0]:
val2 = self._readbyte()
if val2[0]:
return (1, val1[1] << 8 | val2[1])
return (0, 0)
def _readlong(self):
val1 = self._readbyte()
if val1[0]:
val2 = self._readbyte()
if val2[0]:
val3 = self._readbyte()
if val3[0]:
val4 = self._readbyte()
if val4[0]:
return (1, val1[1] << 24 | val2[1] << 16 | val3[1] << 8 | val4[1])
return (0, 0)
def _readslong(self):
val = self._readlong()
if val[0]:
if val[1] & 0x80000000:
return (val[0], val[1] - 0x100000000)
return (val[0], val[1])
return (0, 0)
def _writebyte(self, val):
self.crc_update(val & 0xFF)
self._port.write(chr(val & 0xFF))
def _writesbyte(self, val):
self._writebyte(val)
def _writeword(self, val):
self._writebyte((val >> 8) & 0xFF)
self._writebyte(val & 0xFF)
def _writesword(self, val):
self._writeword(val)
def _writelong(self, val):
self._writebyte((val >> 24) & 0xFF)
self._writebyte((val >> 16) & 0xFF)
self._writebyte((val >> 8) & 0xFF)
self._writebyte(val & 0xFF)
def _writeslong(self, val):
self._writelong(val)
def _read1(self, address, cmd):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address, cmd)
val1 = self._readbyte()
if val1[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc & 0xFFFF != crc[1] & 0xFFFF:
return (0, 0)
return (1, val1[1])
trys -= 1
if trys == 0:
break
return (0, 0)
def _read2(self, address, cmd):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address, cmd)
val1 = self._readword()
if val1[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc & 0xFFFF != crc[1] & 0xFFFF:
return (0, 0)
return (1, val1[1])
trys -= 1
if trys == 0:
break
return (0, 0)
def _read4(self, address, cmd):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address, cmd)
val1 = self._readlong()
if val1[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc & 0xFFFF != crc[1] & 0xFFFF:
return (0, 0)
return (1, val1[1])
trys -= 1
if trys == 0:
break
return (0, 0)
def _read4_1(self, address, cmd):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address, cmd)
val1 = self._readslong()
if val1[0]:
val2 = self._readbyte()
if val2[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc & 0xFFFF != crc[1] & 0xFFFF:
return (0, 0)
return (1, val1[1], val2[1])
trys -= 1
if trys == 0:
break
return (0, 0)
def _read_n(self, address, cmd, args):
trys = self._trystimeout
while 1:
self._port.flushInput()
trys -= 1
if trys == 0:
break
failed = False
self._sendcommand(address, cmd)
data = [1, ]
for i in range(0, args):
val = self._readlong()
if val[0] == 0:
failed = True
break
data.append(val[1])
if failed:
continue
crc = self._readchecksumword()
if crc[0]:
if self._crc & 0xFFFF == crc[1] & 0xFFFF:
return (data);
return (0, 0, 0, 0, 0)
def _writechecksum(self):
self._writeword(self._crc & 0xFFFF)
val = self._readbyte()
if (len(val) > 0):
if val[0]:
return True
return False
def _write0(self, address, cmd):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write1(self, address, cmd, val):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writebyte(val)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write11(self, address, cmd, val1, val2):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writebyte(val1)
self._writebyte(val2)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write111(self, address, cmd, val1, val2, val3):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writebyte(val1)
self._writebyte(val2)
self._writebyte(val3)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write2(self, address, cmd, val):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writeword(val)
if self._writechecksum():
return True
trys = trys - 1
return False
def _writeS2(self, address, cmd, val):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writesword(val)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write22(self, address, cmd, val1, val2):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writeword(val1)
self._writeword(val2)
if self._writechecksum():
return True
trys = trys - 1
return False
def _writeS22(self, address, cmd, val1, val2):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writesword(val1)
self._writeword(val2)
if self._writechecksum():
return True
trys = trys - 1
return False
def _writeS2S2(self, address, cmd, val1, val2):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writesword(val1)
self._writesword(val2)
if self._writechecksum():
return True
trys = trys - 1
return False
def _writeS24(self, address, cmd, val1, val2):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writesword(val1)
self._writelong(val2)
if self._writechecksum():
return True
trys = trys - 1
return False
def _writeS24S24(self, address, cmd, val1, val2, val3, val4):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writesword(val1)
self._writelong(val2)
self._writesword(val3)
self._writelong(val4)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write4(self, address, cmd, val):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val)
if self._writechecksum():
return True
trys = trys - 1
return False
def _writeS4(self, address, cmd, val):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writeslong(val)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write44(self, address, cmd, val1, val2):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writelong(val2)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write4S4(self, address, cmd, val1, val2):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writeslong(val2)
if self._writechecksum():
return True
trys = trys - 1
return False
def _writeS4S4(self, address, cmd, val1, val2):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writeslong(val1)
self._writeslong(val2)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write441(self, address, cmd, val1, val2, val3):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writelong(val2)
self._writebyte(val3)
if self._writechecksum():
return True
trys = trys - 1
return False
def _writeS441(self, address, cmd, val1, val2, val3):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writeslong(val1)
self._writelong(val2)
self._writebyte(val3)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write4S4S4(self, address, cmd, val1, val2, val3):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writeslong(val2)
self._writeslong(val3)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write4S441(self, address, cmd, val1, val2, val3, val4):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writeslong(val2)
self._writelong(val3)
self._writebyte(val4)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write4444(self, address, cmd, val1, val2, val3, val4):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writelong(val2)
self._writelong(val3)
self._writelong(val4)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write4S44S4(self, address, cmd, val1, val2, val3, val4):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writeslong(val2)
self._writelong(val3)
self._writeslong(val4)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write44441(self, address, cmd, val1, val2, val3, val4, val5):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writelong(val2)
self._writelong(val3)
self._writelong(val4)
self._writebyte(val5)
if self._writechecksum():
return True
trys = trys - 1
return False
def _writeS44S441(self, address, cmd, val1, val2, val3, val4, val5):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writeslong(val1)
self._writelong(val2)
self._writeslong(val3)
self._writelong(val4)
self._writebyte(val5)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write4S44S441(self, address, cmd, val1, val2, val3, val4, val5, val6):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writeslong(val2)
self._writelong(val3)
self._writeslong(val4)
self._writelong(val5)
self._writebyte(val6)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write4S444S441(self, address, cmd, val1, val2, val3, val4, val5, val6, val7):
trys = self._trystimeout
while trys:
self._sendcommand(self, address, cmd)
self._writelong(val1)
self._writeslong(val2)
self._writelong(val3)
self._writelong(val4)
self._writeslong(val5)
self._writelong(val6)
self._writebyte(val7)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write4444444(self, address, cmd, val1, val2, val3, val4, val5, val6, val7):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writelong(val2)
self._writelong(val3)
self._writelong(val4)
self._writelong(val5)
self._writelong(val6)
self._writelong(val7)
if self._writechecksum():
return True
trys = trys - 1
return False
def _write444444441(self, address, cmd, val1, val2, val3, val4, val5, val6, val7, val8, val9):
trys = self._trystimeout
while trys:
self._sendcommand(address, cmd)
self._writelong(val1)
self._writelong(val2)
self._writelong(val3)
self._writelong(val4)
self._writelong(val5)
self._writelong(val6)
self._writelong(val7)
self._writelong(val8)
self._writebyte(val9)
if self._writechecksum():
return True
trys = trys - 1
return False
# User accessible functions
def SendRandomData(self, cnt):
for i in range(0, cnt):
byte = random.getrandbits(8)
self._port.write(chr(byte))
return
def ForwardM1(self, address, val):
return self._write1(address, self.Cmd.M1FORWARD, val)
def BackwardM1(self, address, val):
return self._write1(address, self.Cmd.M1BACKWARD, val)
def SetMinVoltageMainBattery(self, address, val):
return self._write1(address, self.Cmd.SETMINMB, val)
def SetMaxVoltageMainBattery(self, address, val):
return self._write1(address, self.Cmd.SETMAXMB, val)
def ForwardM2(self, address, val):
return self._write1(address, self.Cmd.M2FORWARD, val)
def BackwardM2(self, address, val):
return self._write1(address, self.Cmd.M2BACKWARD, val)
def ForwardBackwardM1(self, address, val):
return self._write1(address, self.Cmd.M17BIT, val)
def ForwardBackwardM2(self, address, val):
return self._write1(address, self.Cmd.M27BIT, val)
def ForwardMixed(self, address, val):
return self._write1(address, self.Cmd.MIXEDFORWARD, val)
def BackwardMixed(self, address, val):
return self._write1(address, self.Cmd.MIXEDBACKWARD, val)
def TurnRightMixed(self, address, val):
return self._write1(address, self.Cmd.MIXEDRIGHT, val)
def TurnLeftMixed(self, address, val):
return self._write1(address, self.Cmd.MIXEDLEFT, val)
def ForwardBackwardMixed(self, address, val):
return self._write1(address, self.Cmd.MIXEDFB, val)
def LeftRightMixed(self, address, val):
return self._write1(address, self.Cmd.MIXEDLR, val)
def ReadEncM1(self, address):
return self._read4_1(address, self.Cmd.GETM1ENC)
def ReadEncM2(self, address):
return self._read4_1(address, self.Cmd.GETM2ENC)
def ReadSpeedM1(self, address):
return self._read4_1(address, self.Cmd.GETM1SPEED)
def ReadSpeedM2(self, address):
return self._read4_1(address, self.Cmd.GETM2SPEED)
def ResetEncoders(self, address):
return self._write0(address, self.Cmd.RESETENC)
def ReadVersion(self, address):
trys = self._trystimeout
while 1:
self._port.flushInput()
self._sendcommand(address, self.Cmd.GETVERSION)
str = ""
passed = True
for i in range(0, 48):
data = self._port.read(1)
if len(data):
val = ord(data)
self.crc_update(val)
if (val == 0):
break
str += data[0]
else:
passed = False
break
if passed:
crc = self._readchecksumword()
if crc[0]:
if self._crc & 0xFFFF == crc[1] & 0xFFFF:
return (1, str)
else:
time.sleep(0.01)
trys -= 1
if trys == 0:
break
return (0, 0)
def SetEncM1(self, address, cnt):
return self._write4(address, self.Cmd.SETM1ENCCOUNT, cnt)
def SetEncM2(self, address, cnt):
return self._write4(address, self.Cmd.SETM2ENCCOUNT, cnt)
def ReadMainBatteryVoltage(self, address):
return self._read2(address, self.Cmd.GETMBATT)
def ReadLogicBatteryVoltage(self, address, ):
return self._read2(address, self.Cmd.GETLBATT)
def SetMinVoltageLogicBattery(self, address, val):
return self._write1(address, self.Cmd.SETMINLB, val)
def SetMaxVoltageLogicBattery(self, address, val):
return self._write1(address, self.Cmd.SETMAXLB, val)
def SetM1VelocityPID(self, address, p, i, d, qpps):
return self._write4444(address, self.Cmd.SETM1PID, long(d * 65536), long(p * 65536), long(i * 65536), qpps)
def SetM2VelocityPID(self, address, p, i, d, qpps):
return self._write4444(address, self.Cmd.SETM2PID, long(d * 65536), long(p * 65536), long(i * 65536), qpps)
def ReadISpeedM1(self, address):
return self._read4_1(address, self.Cmd.GETM1ISPEED)
def ReadISpeedM2(self, address):
return self._read4_1(address, self.Cmd.GETM2ISPEED)
def DutyM1(self, address, val):
return self._simplFunctionS2(address, self.Cmd.M1DUTY, val)
def DutyM2(self, address, val):
return self._simplFunctionS2(address, self.Cmd.M2DUTY, val)
def DutyM1M2(self, address, m1, m2):
return self._writeS2S2(address, self.Cmd.MIXEDDUTY, m1, m2)
def SpeedM1(self, address, val):
return self._writeS4(address, self.Cmd.M1SPEED, val)
def SpeedM2(self, address, val):
return self._writeS4(address, self.Cmd.M2SPEED, val)
def SpeedM1M2(self, address, m1, m2):
return self._writeS4S4(address, self.Cmd.MIXEDSPEED, m1, m2)
def SpeedAccelM1(self, address, accel, speed):
return self._write4S4(address, self.Cmd.M1SPEEDACCEL, accel, speed)
def SpeedAccelM2(self, address, accel, speed):
return self._write4S4(address, self.Cmd.M2SPEEDACCEL, accel, speed)
def SpeedAccelM1M2(self, address, accel, speed1, speed2):
return self._write4S4S4(address, self.Cmd.MIXEDSPEEDACCEL, accel, speed1, speed2)
def SpeedDistanceM1(self, address, speed, distance, buffer):
return self._writeS441(address, self.Cmd.M1SPEEDDIST, speed, distance, buffer)
def SpeedDistanceM2(self, address, speed, distance, buffer):
return self._writeS441(address, self.Cmd.M2SPEEDDIST, speed, distance, buffer)
def SpeedDistanceM1M2(self, address, speed1, distance1, speed2, distance2, buffer):
return self._writeS44S441(address, self.Cmd.MIXEDSPEEDDIST, speed1, distance1, speed2, distance2, buffer)
def SpeedAccelDistanceM1(self, address, accel, speed, distance, buffer):
return self._write4S441(address, self.Cmd.M1SPEEDACCELDIST, accel, speed, distance, buffer)
def SpeedAccelDistanceM2(self, address, accel, speed, distance, buffer):
return self._write4S441(address, self.Cmd.M2SPEEDACCELDIST, accel, speed, distance, buffer)
def SpeedAccelDistanceM1M2(self, address, accel, speed1, distance1, speed2, distance2, buffer):
return self._write4S44S441(address, self.Cmd.MIXEDSPEEDACCELDIST, accel, speed1, distance1, speed2, distance2,
buffer)
def ReadBuffers(self, address):
val = self._read2(address, self.Cmd.GETBUFFERS)
if val[0]:
return (1, val[1] >> 8, val[1] & 0xFF)
return (0, 0, 0)
def ReadPWMs(self, address):
val = self._read4(address, self.Cmd.GETPWMS)
if val[0]:
pwm1 = val[1] >> 16
pwm2 = val[1] & 0xFFFF
if pwm1 & 0x8000:
pwm1 -= 0x10000
if pwm2 & 0x8000:
pwm2 -= 0x10000
return (1, pwm1, pwm2)
return (0, 0, 0)
def ReadCurrents(self, address):
val = self._read4(address, self.Cmd.GETCURRENTS)
if val[0]:
cur1 = val[1] >> 16
cur2 = val[1] & 0xFFFF
if cur1 & 0x8000:
cur1 -= 0x10000
if cur2 & 0x8000:
cur2 -= 0x10000
return (1, cur1, cur2)
return (0, 0, 0)
def SpeedAccelM1M2_2(self, address, accel1, speed1, accel2, speed2):
return self._write4S44S4(address, self.Cmd.MIXEDSPEED2ACCEL, accel, speed1, accel2, speed2)
def SpeedAccelDistanceM1M2_2(self, address, accel1, speed1, distance1, accel2, speed2, distance2, buffer):
return self._write4S444S441(address, self.Cmd.MIXEDSPEED2ACCELDIST, accel1, speed1, distance1, accel2, speed2,
distance2, buffer)
def DutyAccelM1(self, address, accel, duty):
return self._writeS24(address, self.Cmd.M1DUTYACCEL, duty, accel)
def DutyAccelM2(self, address, accel, duty):
return self._writeS24(address, self.Cmd.M2DUTYACCEL, duty, accel)
def DutyAccelM1M2(self, address, accel1, duty1, accel2, duty2):
return self._writeS24S24(self.Cmd.MIXEDDUTYACCEL, duty1, accel1, duty2, accel2)
def ReadM1VelocityPID(self, address):
data = self._read_n(address, self.Cmd.READM1PID, 4)
if data[0]:
data[1] /= 65536.0
data[2] /= 65536.0
data[3] /= 65536.0
return data
return (0, 0, 0, 0, 0)
def ReadM2VelocityPID(self, address):
data = self._read_n(address, self.Cmd.READM2PID, 4)
if data[0]:
data[1] /= 65536.0
data[2] /= 65536.0
data[3] /= 65536.0
return data
return (0, 0, 0, 0, 0)
def SetMainVoltages(self, address, min, max):
return self._write22(address, self.Cmd.SETMAINVOLTAGES, min, max)
def SetLogicVoltages(self, address, min, max):
return self._write22(address, self.Cmd.SETLOGICVOLTAGES, min, max)
def ReadMinMaxMainVoltages(self, address):
val = self._read4(address, self.Cmd.GETMINMAXMAINVOLTAGES)
if val[0]:
min = val[1] >> 16
max = val[1] & 0xFFFF
return (1, min, max)
return (0, 0, 0)
def ReadMinMaxLogicVoltages(self, address):
val = self._read4(address, self.Cmd.GETMINMAXLOGICVOLTAGES)
if val[0]:
min = val[1] >> 16
max = val[1] & 0xFFFF
return (1, min, max)
return (0, 0, 0)
def SetM1PositionPID(self, address, kp, ki, kd, kimax, deadzone, min, max):
return self._write4444444(address, self.Cmd.SETM1POSPID, long(kd * 1024), long(kp * 1024), long(ki * 1024),
kimax, deadzone, min, max)
def SetM2PositionPID(self, address, kp, ki, kd, kimax, deadzone, min, max):
return self._write4444444(address, self.Cmd.SETM2POSPID, long(kd * 1024), long(kp * 1024), long(ki * 1024),
kimax, deadzone, min, max)
def ReadM1PositionPID(self, address):
data = self._read_n(address, self.Cmd.READM1POSPID, 7)
if data[0]:
data[1] /= 1024.0
data[2] /= 1024.0
data[3] /= 1024.0
return data
return (0, 0, 0, 0, 0, 0, 0, 0)
def ReadM2PositionPID(self, address):
data = self._read_n(address, self.Cmd.READM2POSPID, 7)
if data[0]:
data[1] /= 1024.0
data[2] /= 1024.0
data[3] /= 1024.0
return data
return (0, 0, 0, 0, 0, 0, 0, 0)
def SpeedAccelDeccelPositionM1(self, address, accel, speed, deccel, position, buffer):
return self._write44441(address, self.Cmd.M1SPEEDACCELDECCELPOS, accel, speed, deccel, position, buffer)
def SpeedAccelDeccelPositionM2(self, address, accel, speed, deccel, position, buffer):
return self._write44441(address, self.Cmd.M2SPEEDACCELDECCELPOS, accel, speed, deccel, position, buffer)
def SpeedAccelDeccelPositionM1M2(self, address, accel1, speed1, deccel1, position1, accel2, speed2, deccel2,
position2, buffer):
return self._write444444441(address, self.Cmd.MIXEDSPEEDACCELDECCELPOS, accel1, speed1, deccel1, position1,
accel2, speed2, deccel2, position2, buffer)
def SetM1DefaultAccel(self, address, accel):
return self._write4(address, self.Cmd.SETM1DEFAULTACCEL, accel)
def SetM2DefaultAccel(self, address, accel):
return self._write4(address, self.Cmd.SETM2DEFAULTACCEL, accel)
def SetPinFunctions(self, address, S3mode, S4mode, S5mode):
return self._write111(address, self.Cmd.SETPINFUNCTIONS, S3mode, S4mode, S5mode)
def ReadPinFunctions(self, address):
trys = self._trystimeout
while 1:
self._sendcommand(address, self.Cmd.GETPINFUNCTIONS)
val1 = self._readbyte()
if val1[0]:
val2 = self._readbyte()
if val1[0]:
val3 = self._readbyte()
if val1[0]:
crc = self._readchecksumword()
if crc[0]:
if self._crc & 0xFFFF != crc[1] & 0xFFFF:
return (0, 0)
return (1, val1[1], val2[1], val3[1])
trys -= 1
if trys == 0:
break
return (0, 0)
def SetDeadBand(self, address, min, max):
return self._write11(address, self.Cmd.SETDEADBAND, min, max)
def GetDeadBand(self, address):
val = self._read2(address, self.Cmd.GETDEADBAND)
if val[0]:
return (1, val[1] >> 8, val[1] & 0xFF)
return (0, 0, 0)
# Warning(TTL Serial): Baudrate will change if not already set to 38400. Communications will be lost
def RestoreDefaults(self, address):
return self._write0(address, self.Cmd.RESTOREDEFAULTS)
def ReadTemp(self, address):
return self._read2(address, self.Cmd.GETTEMP)
def ReadTemp2(self, address):
return self._read2(address, self.Cmd.GETTEMP2)
def ReadError(self, address):
return self._read2(address, self.Cmd.GETERROR)
def ReadEncoderModes(self, address):
val = self._read2(address, self.Cmd.GETENCODERMODE)
if val[0]:
return (1, val[1] >> 8, val[1] & 0xFF)
return (0, 0, 0)
def SetM1EncoderMode(self, address, mode):
return self._write1(address, self.Cmd.SETM1ENCODERMODE, mode)
def SetM2EncoderMode(self, address, mode):
return self._write1(address, self.Cmd.SETM2ENCODERMODE, mode)
# saves active settings to NVM
def WriteNVM(self, address):
return self._write4(address, self.Cmd.WRITENVM, 0xE22EAB7A)
# restores settings from NVM
# Warning(TTL Serial): If baudrate changes or the control mode changes communications will be lost
def ReadNVM(self, address):
return self._write0(address, self.Cmd.READNVM)
# Warning(TTL Serial): If control mode is changed from packet serial mode when setting config communications will be lost!
# Warning(TTL Serial): If baudrate of packet serial mode is changed communications will be lost!
def SetConfig(self, address, config):
return self._write2(address, self.Cmd.SETCONFIG, config)
def GetConfig(self, address):
return self._read2(address, self.Cmd.GETCONFIG)
def SetM1MaxCurrent(self, address, max):
return self._write44(address, self.Cmd.SETM1MAXCURRENT, max, 0)
def SetM2MaxCurrent(self, address, max):
return self._write44(address, self.Cmd.SETM2MAXCURRENT, max, 0)
def ReadM1MaxCurrent(self, address):
data = self._read_n(address, self.Cmd.GETM1MAXCURRENT, 2)
if data[0]:
return (1, data[1])
return (0, 0)
def ReadM2MaxCurrent(self, address):
data = self._read_n(address, self.Cmd.GETM2MAXCURRENT, 2)
if data[0]:
return (1, data[1])
return (0, 0)
def SetPWMMode(self, address, mode):
return self._write1(address, self.Cmd.SETPWMMODE, mode)
def ReadPWMMode(self, address):
return self._read1(address, self.Cmd.GETPWMMODE)
def Open(self):
try:
self._port = serial.Serial(port=self.comport, baudrate=self.rate, timeout=1, interCharTimeout=self.timeout)
except:
return 0
return 1
| bsd-2-clause |
mammadori/pyglet | contrib/layout/layout/Plex/Scanners.py | 32 | 11656 | #=======================================================================
#
# Python Lexical Analyser
#
#
# Scanning an input stream
#
#=======================================================================
import Errors
from Regexps import BOL, EOL, EOF
class Scanner:
"""
A Scanner is used to read tokens from a stream of characters
using the token set specified by a Plex.Lexicon.
Constructor:
Scanner(lexicon, stream, name = '')
See the docstring of the __init__ method for details.
Methods:
See the docstrings of the individual methods for more
information.
read() --> (value, text)
Reads the next lexical token from the stream.
position() --> (name, line, col)
Returns the position of the last token read using the
read() method.
begin(state_name)
Causes scanner to change state.
produce(value [, text])
Causes return of a token value to the caller of the
Scanner.
"""
lexicon = None # Lexicon
stream = None # file-like object
name = ''
buffer = ''
buf_start_pos = 0 # position in input of start of buffer
next_pos = 0 # position in input of next char to read
cur_pos = 0 # position in input of current char
cur_line = 1 # line number of current char
cur_line_start = 0 # position in input of start of current line
start_pos = 0 # position in input of start of token
start_line = 0 # line number of start of token
start_col = 0 # position in line of start of token
text = None # text of last token read
initial_state = None # Node
state_name = '' # Name of initial state
queue = None # list of tokens to be returned
trace = 0
def __init__(self, lexicon, stream, name = ''):
"""
Scanner(lexicon, stream, name = '')
|lexicon| is a Plex.Lexicon instance specifying the lexical tokens
to be recognised.
|stream| can be a file object or anything which implements a
compatible read() method.
|name| is optional, and may be the name of the file being
scanned or any other identifying string.
"""
self.lexicon = lexicon
self.stream = stream
self.name = name
self.queue = []
self.initial_state = None
self.begin('')
self.next_pos = 0
self.cur_pos = 0
self.cur_line_start = 0
self.cur_char = BOL
self.input_state = 1
def read(self):
"""
Read the next lexical token from the stream and return a
tuple (value, text), where |value| is the value associated with
the token as specified by the Lexicon, and |text| is the actual
string read from the stream. Returns (None, '') on end of file.
"""
queue = self.queue
while not queue:
self.text, action = self.scan_a_token()
if action is None:
self.produce(None)
self.eof()
else:
value = action.perform(self, self.text)
if value is not None:
self.produce(value)
result = queue[0]
del queue[0]
return result
def scan_a_token(self):
"""
Read the next input sequence recognised by the machine
and return (text, action). Returns ('', None) on end of
file.
"""
self.start_pos = self.cur_pos
self.start_line = self.cur_line
self.start_col = self.cur_pos - self.cur_line_start
# if self.trace:
# action = self.run_machine()
# else:
# action = self.run_machine_inlined()
action = self.run_machine_inlined()
if action:
if self.trace:
print "Scanner: read: Performing", action, "%d:%d" % (
self.start_pos, self.cur_pos)
base = self.buf_start_pos
text = self.buffer[self.start_pos - base : self.cur_pos - base]
return (text, action)
else:
if self.cur_pos == self.start_pos:
if self.cur_char == EOL:
self.next_char()
if not self.cur_char or self.cur_char == EOF:
return ('', None)
raise Errors.UnrecognizedInput(self, self.state_name)
def run_machine(self):
"""
Run the machine until no more transitions are possible.
"""
self.state = self.initial_state
self.backup_state = None
while self.transition():
pass
return self.back_up()
def run_machine_inlined(self):
"""
Inlined version of run_machine for speed.
"""
state = self.initial_state
cur_pos = self.cur_pos
cur_line = self.cur_line
cur_line_start = self.cur_line_start
cur_char = self.cur_char
input_state = self.input_state
next_pos = self.next_pos
buffer = self.buffer
buf_start_pos = self.buf_start_pos
buf_len = len(buffer)
backup_state = None
trace = self.trace
while 1:
if trace: #TRACE#
print "State %d, %d/%d:%s -->" % ( #TRACE#
state['number'], input_state, cur_pos, repr(cur_char)), #TRACE#
# Begin inlined self.save_for_backup()
#action = state.action #@slow
action = state['action'] #@fast
if action:
backup_state = (
action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos)
# End inlined self.save_for_backup()
c = cur_char
#new_state = state.new_state(c) #@slow
new_state = state.get(c, -1) #@fast
if new_state == -1: #@fast
new_state = c and state.get('else') #@fast
if new_state:
if trace: #TRACE#
print "State %d" % new_state['number'] #TRACE#
state = new_state
# Begin inlined: self.next_char()
if input_state == 1:
cur_pos = next_pos
# Begin inlined: c = self.read_char()
buf_index = next_pos - buf_start_pos
if buf_index < buf_len:
c = buffer[buf_index]
next_pos = next_pos + 1
else:
discard = self.start_pos - buf_start_pos
data = self.stream.read(0x1000)
buffer = self.buffer[discard:] + data
self.buffer = buffer
buf_start_pos = buf_start_pos + discard
self.buf_start_pos = buf_start_pos
buf_len = len(buffer)
buf_index = buf_index - discard
if data:
c = buffer[buf_index]
next_pos = next_pos + 1
else:
c = ''
# End inlined: c = self.read_char()
if c == '\n':
cur_char = EOL
input_state = 2
elif not c:
cur_char = EOL
input_state = 4
else:
cur_char = c
elif input_state == 2:
cur_char = '\n'
input_state = 3
elif input_state == 3:
cur_line = cur_line + 1
cur_line_start = cur_pos = next_pos
cur_char = BOL
input_state = 1
elif input_state == 4:
cur_char = EOF
input_state = 5
else: # input_state = 5
cur_char = ''
# End inlined self.next_char()
else: # not new_state
if trace: #TRACE#
print "blocked" #TRACE#
# Begin inlined: action = self.back_up()
if backup_state:
(action, cur_pos, cur_line, cur_line_start,
cur_char, input_state, next_pos) = backup_state
else:
action = None
break # while 1
# End inlined: action = self.back_up()
self.cur_pos = cur_pos
self.cur_line = cur_line
self.cur_line_start = cur_line_start
self.cur_char = cur_char
self.input_state = input_state
self.next_pos = next_pos
if trace: #TRACE#
if action: #TRACE#
print "Doing", action #TRACE#
return action
# def transition(self):
# self.save_for_backup()
# c = self.cur_char
# new_state = self.state.new_state(c)
# if new_state:
# if self.trace:
# print "Scanner: read: State %d: %s --> State %d" % (
# self.state.number, repr(c), new_state.number)
# self.state = new_state
# self.next_char()
# return 1
# else:
# if self.trace:
# print "Scanner: read: State %d: %s --> blocked" % (
# self.state.number, repr(c))
# return 0
# def save_for_backup(self):
# action = self.state.get_action()
# if action:
# if self.trace:
# print "Scanner: read: Saving backup point at", self.cur_pos
# self.backup_state = (
# action, self.cur_pos, self.cur_line, self.cur_line_start,
# self.cur_char, self.input_state, self.next_pos)
# def back_up(self):
# backup_state = self.backup_state
# if backup_state:
# (action, self.cur_pos, self.cur_line, self.cur_line_start,
# self.cur_char, self.input_state, self.next_pos) = backup_state
# if self.trace:
# print "Scanner: read: Backing up to", self.cur_pos
# return action
# else:
# return None
def next_char(self):
input_state = self.input_state
if self.trace:
print "Scanner: next:", " "*20, "[%d] %d" % (input_state, self.cur_pos),
if input_state == 1:
self.cur_pos = self.next_pos
c = self.read_char()
if c == '\n':
self.cur_char = EOL
self.input_state = 2
elif not c:
self.cur_char = EOL
self.input_state = 4
else:
self.cur_char = c
elif input_state == 2:
self.cur_char = '\n'
self.input_state = 3
elif input_state == 3:
self.cur_line = self.cur_line + 1
self.cur_line_start = self.cur_pos = self.next_pos
self.cur_char = BOL
self.input_state = 1
elif input_state == 4:
self.cur_char = EOF
self.input_state = 5
else: # input_state = 5
self.cur_char = ''
if self.trace:
print "--> [%d] %d %s" % (input_state, self.cur_pos, repr(self.cur_char))
# def read_char(self):
# """
# Get the next input character, filling the buffer if necessary.
# Returns '' at end of file.
# """
# next_pos = self.next_pos
# buf_index = next_pos - self.buf_start_pos
# if buf_index == len(self.buffer):
# discard = self.start_pos - self.buf_start_pos
# data = self.stream.read(0x1000)
# self.buffer = self.buffer[discard:] + data
# self.buf_start_pos = self.buf_start_pos + discard
# buf_index = buf_index - discard
# if not data:
# return ''
# c = self.buffer[buf_index]
# self.next_pos = next_pos + 1
# return c
def position(self):
"""
Return a tuple (name, line, col) representing the location of
the last token read using the read() method. |name| is the
name that was provided to the Scanner constructor; |line|
is the line number in the stream (1-based); |col| is the
position within the line of the first character of the token
(0-based).
"""
return (self.name, self.start_line, self.start_col)
def begin(self, state_name):
"""Set the current state of the scanner to the named state."""
self.initial_state = (
self.lexicon.get_initial_state(state_name))
self.state_name = state_name
def produce(self, value, text = None):
"""
Called from an action procedure, causes |value| to be returned
as the token value from read(). If |text| is supplied, it is
returned in place of the scanned text.
produce() can be called more than once during a single call to an action
procedure, in which case the tokens are queued up and returned one
at a time by subsequent calls to read(), until the queue is empty,
whereupon scanning resumes.
"""
if text is None:
text = self.text
self.queue.append((value, text))
def eof(self):
"""
Override this method if you want something to be done at
end of file.
"""
# For backward compatibility:
setattr(Scanner, "yield", Scanner.produce)
| bsd-3-clause |
mmmavis/lightbeam-bedrock-website | vendor-local/packages/chardet/chardet/big5freq.py | 323 | 82588 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = ( \
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
| mpl-2.0 |
mikelj/h-store | tools/hstore/fabric/abstractfabric.py | 9 | 22585 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (C) 2013 by H-Store Project
# Brown University
# Massachusetts Institute of Technology
# Yale University
#
# http://hstore.cs.brown.edu/
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# -----------------------------------------------------------------------
from __future__ import with_statement
import os
import sys
import re
import math
import time
import logging
import paramiko
import string
from datetime import datetime
from StringIO import StringIO
from pprint import pformat
## H-Store Third-Party Libraries
realpath = os.path.realpath(__file__)
basedir = os.path.dirname(realpath)
if not os.path.exists(realpath):
cwd = os.getcwd()
basename = os.path.basename(realpath)
if os.path.exists(os.path.join(cwd, basename)):
basedir = cwd
sys.path.append(os.path.realpath(os.path.join(basedir, "../../third_party/python")))
from fabric.api import *
from fabric.contrib.files import *
## =====================================================================
## LOGGING CONFIGURATION
## =====================================================================
LOG = logging.getLogger(__name__)
LOG_handler = logging.StreamHandler()
LOG_formatter = logging.Formatter(fmt='%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S')
LOG_handler.setFormatter(LOG_formatter)
LOG.addHandler(LOG_handler)
LOG.setLevel(logging.INFO)
## =====================================================================
## DEPLOYMENT CONFIGURATION
## =====================================================================
ENV_DEFAULT = {
# Fabric Options
"key_filename": os.path.join(os.environ["HOME"], ".ssh/hstore.pem"),
"user": os.environ["USER"],
"disable_known_hosts": True,
"no_agent": True,
"port": 22,
# Client Options
"client.count": 1,
"client.threads_per_host": 500,
# H-Store Options
"hstore.basedir": None,
"hstore.git": "git://github.com/apavlo/h-store.git",
"hstore.git_branch": "master",
"hstore.git_options": "",
"hstore.clean": False,
"hstore.exec_prefix": "",
"hstore.partitions": 6,
"hstore.sites_per_host": 1,
"hstore.partitions_per_site": 8,
"hstore.round_robin_partitions": True,
}
## =====================================================================
## AbstractFabric
## =====================================================================
class AbstractFabric(object):
def __init__(self, env, envUpdates):
self.env = env
self.updateEnv(ENV_DEFAULT)
self.updateEnv(envUpdates)
self.hstore_dir = os.path.join(self.env["hstore.basedir"], "h-store")
LOG.debug("HSTORE DIR: %s", self.hstore_dir)
self.running_instances = [ ]
self.all_instances = [ ]
self.partitionCount = self.env["hstore.partitions"]
self.clientCount = self.env["client.count"]
if not self.env.get("hstore.num_hosts_round_robin", None) is None:
self.hostCount = int(self.env["hstore.num_hosts_round_robin"])
self.siteCount = self.hostCount
else:
self.siteCount = int(math.ceil(self.partitionCount / float(self.env["hstore.partitions_per_site"])))
self.hostCount = int(math.ceil(self.siteCount / float(self.env["hstore.sites_per_host"])))
## DEF
def updateEnv(self, envUpdates):
for k, v in envUpdates.iteritems():
self.env[k] = v
if v:
t = type(v)
LOG.debug("%s [%s] => %s" % (k, t, self.env[k]))
self.env[k] = t(self.env[k])
## FOR
## DEF
## =====================================================================
## IMPLEMENTATION API
## =====================================================================
def stop_cluster(self, **kwargs):
"""Stop all instances in the cluster"""
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def __startInstances__(self, **kwargs):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def updateLog4j(self, reset=False, debug=[], trace=[]):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def sync_time(self):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def getInstance(self, public_dns_name):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def getAllInstances(self):
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def getRunningSiteInstances():
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
def getRunningClientInstances():
raise NotImplementedError("Unimplemented %s" % self.__init__.im_class)
## DEF
## =====================================================================
## MAIN API
## =====================================================================
## ----------------------------------------------
## get_version
## ----------------------------------------------
def get_version(self, inst):
"""Get the current Git commit id and date in the deployment directory"""
with settings(host_string=inst.public_dns_name):
with cd(self.hstore_dir):
output = run("git log --pretty=format:' %h %at ' -n 1")
data = map(string.strip, output.split(" "))
rev_id = str(data[1])
rev_date = datetime.fromtimestamp(int(data[2]))
LOG.info("Revision: %s / %s" % (rev_id, rev_date))
return (rev_id, rev_date)
## WITH
## DEF
def get_file(self, inst, filePath):
"""Retrieve and print the file from the cluster for the given path"""
sio = StringIO()
with settings(host_string=inst.public_dns_name):
if get(filePath, local_path=sio).failed:
raise Exception("Failed to retrieve remote file '%s'" % filePath)
return sio.getvalue()
## DEF
## ---------------------------------------------------------------------
## INTERNAL API
## ---------------------------------------------------------------------
def exec_benchmark(self, inst, project, \
removals=[ ], json=False, build=True, trace=False, \
updateJar=True, updateConf=True, updateRepo=False, resetLog4j=False, \
extraParams={ } ):
## Make sure we have enough instances
if (self.hostCount + self.clientCount) > len(self.running_instances):
raise Exception("Needed %d host + %d client instances but only %d are currently running" % (\
self.hostCount, self.clientCount, len(self.running_instances)))
hosts = [ ]
clients = [ ]
host_id = 0
site_id = 0
partition_id = 0
partitions_per_site = self.env["hstore.partitions_per_site"]
## HStore Sites
site_hosts = set()
## Attempt to assign the same number of partitions to nodes
if self.env.get("hstore.round_robin_partitions", False):
sites_needed = math.ceil(self.env["hstore.partitions"] / float(partitions_per_site))
partitions_per_site = math.ceil(self.env["hstore.partitions"] / float(sites_needed))
LOG.debug("Partitions Needed: %d" % self.env["hstore.partitions"])
LOG.debug("Partitions Per Site: %d" % partitions_per_site)
LOG.debug("Sites Per Host: %d" % self.env["hstore.sites_per_host"])
for siteInst in self.getRunningInstances():
site_hosts.add(siteInst.private_dns_name)
for i in range(self.env["hstore.sites_per_host"]):
firstPartition = partition_id
lastPartition = min(self.env["hstore.partitions"], firstPartition + partitions_per_site)-1
host = "%s:%d:%d" % (siteInst.private_dns_name, site_id, firstPartition)
if firstPartition != lastPartition:
host += "-%d" % lastPartition
partition_id += partitions_per_site
site_id += 1
hosts.append(host)
if lastPartition+1 == self.env["hstore.partitions"]: break
## FOR (SITES)
if lastPartition+1 == self.env["hstore.partitions"]: break
## FOR
LOG.debug("Last Partition: %d", lastPartition)
LOG.debug("Site Hosts: %s" % site_hosts)
assert len(hosts) > 0
## HStore Clients
for clientInst in self.getRunningInstances():
if clientInst.private_dns_name in site_hosts: continue
clients.append(clientInst.private_dns_name)
## FOR
LOG.debug("Client Hosts: %s" % clients)
assert len(clients) > 0
## Make sure the the checkout is up to date
if updateRepo:
LOG.info("Updating H-Store Git checkout")
self.deploy_hstore(build=build, update=True)
## Update H-Store Conf file
## Do this after we update the repository so that we can put in our updates
if updateConf:
LOG.info("Updating H-Store configuration files")
self.write_conf(project, removals, revertFirst=True)
if resetLog4j:
LOG.info("Reverting log4j.properties")
self.resetLog4j()
## Construct dict of command-line H-Store options
hstore_options = {
"client.hosts": ",".join(clients),
"client.count": self.env["client.count"],
"client.threads_per_host": self.env["client.threads_per_host"],
"project": project,
"hosts": '"%s"' % ";".join(hosts),
}
if json: hstore_options["client.output_results_json"] = True
if trace:
hstore_options["trace"] = "traces/%s-%d" % (project, time.time())
LOG.debug("Enabling trace files that will be output to '%s'" % hstore_options["trace"])
LOG.debug("H-Store Config:\n" + pformat(hstore_options))
## Extra Parameters
if extraParams:
hstore_options = dict(hstore_options.items() + extraParams.items())
## Any other option not listed in the above dict should be written to
## a properties file
workloads = None
hstore_opts_cmd = " ".join(map(lambda x: "-D%s=%s" % (x, hstore_options[x]), hstore_options.keys()))
with settings(host_string=inst.public_dns_name):
with cd(self.hstore_dir):
prefix = self.env["hstore.exec_prefix"]
if updateJar:
LOG.info("Updating H-Store %s project jar file" % (project.upper()))
cmd = "ant %s hstore-prepare %s" % (prefix, hstore_opts_cmd)
run(cmd)
projectFile = os.path.join(self.hstore_dir, project+".jar")
for other in self.running_instances:
if other == inst: continue
run("scp %s %s:%s" % (projectFile, other.public_dns_name, projectFile))
## IF
LOG.info("Running benchmark on %s", inst)
cmd = "ant %s hstore-benchmark %s" % (prefix, hstore_opts_cmd)
output = run(cmd)
## If they wanted a trace file, then we have to ship it back to ourselves
if trace:
output = "/tmp/hstore/workloads/%s.trace" % project
combine_opts = {
"project": project,
"global.memory": 5000,
"output": output,
"workload": hstore_options["trace"] + "*",
}
LOG.debug("Combine %s workload traces into '%s'" % (project.upper(), output))
combine_opts_cmd = " ".join(map(lambda x: "-D%s=%s" % (x, combine_opts[x]), combine_opts.keys()))
run("ant workload-combine %s" % combine_opts_cmd)
workloads = get(output + ".gz")
## IF
## WITH
## WITH
assert output
return output, workloads
## DEF
## ----------------------------------------------
## __setupInstance__
## ----------------------------------------------
def __setupInstance__(self, inst, build=True, update=True):
need_files = False
with settings(host_string=inst.public_dns_name):
with settings(warn_only=True):
if run("test -d %s" % self.hstore_dir).failed:
with cd(os.path.dirname(self.hstore_dir)):
LOG.debug("Initializing H-Store source code directory for branch '%s'" % self.env["hstore.git_branch"])
run("git clone --branch %s %s %s" % (self.env["hstore.git_branch"], \
self.env["hstore.git_options"], \
self.env["hstore.git"]))
update = True
need_files = True
## WITH
with cd(self.hstore_dir):
run("git checkout %s" % self.env["hstore.git_branch"])
if update:
LOG.debug("Pulling in latest changes for branch '%s'" % self.env["hstore.git_branch"])
run("git checkout -- properties")
run("git pull %s" % self.env["hstore.git_options"])
## Checkout Extra Files
with settings(warn_only=True):
if run("test -d %s" % "files").failed:
LOG.debug("Initializing H-Store research files directory for branch '%s'" % self.env["hstore.git_branch"])
run("ant junit-getfiles")
elif update:
LOG.debug("Pulling in latest research files for branch '%s'" % self.env["hstore.git_branch"])
run("ant junit-getfiles-update")
## IF
## WITH
if build:
LOG.debug("Building H-Store from source code")
if self.env["hstore.clean"]:
run("ant clean-all")
run("ant build")
## WITH
## WITH
run("cd %s" % self.hstore_dir)
## WITH
## DEF
## ----------------------------------------------
## __writeConf__
## ----------------------------------------------
def __writeConf__(self, inst, project, removals=[ ], revertFirst=False):
prefix_include = [ 'site', 'client', 'global', 'benchmark' ]
hstoreConf_updates = { }
hstoreConf_removals = set()
benchmarkConf_updates = { }
benchmarkConf_removals = set()
with settings(host_string=inst.public_dns_name):
for key in self.env.keys():
prefix = key.split(".")[0]
if not prefix in prefix_include: continue
if prefix == "benchmark":
benchmarkConf_updates[key.split(".")[-1]] = self.env[key]
else:
hstoreConf_updates[key] = self.env[key]
## FOR
for key in removals:
prefix = key.split(".")[0]
if not prefix in prefix_include: continue
if prefix == "benchmark":
key = key.split(".")[-1]
assert not key in benchmarkConf_updates, key
benchmarkConf_removals.add(key)
else:
assert not key in hstoreConf_updates, key
hstoreConf_removals.add(key)
## FOR
toUpdate = [
("properties/default.properties", hstoreConf_updates, hstoreConf_removals),
("properties/benchmarks/%s.properties" % project, benchmarkConf_updates, benchmarkConf_removals),
]
with cd(self.hstore_dir):
for _file, _updates, _removals in toUpdate:
if revertFirst:
LOG.info("Reverting '%s'" % _file)
run("git checkout %s -- %s" % (self.env["hstore.git_options"], _file))
self.__updateConf__(inst, _file, _updates, _removals)
## FOR
## WITH
## WITH
## DEF
## ----------------------------------------------
## __updateConf__
## ----------------------------------------------
def __updateConf__(self, inst, conf_file, updates={ }, removals=[ ], noSpaces=False):
LOG.info("Updating configuration file '%s' - Updates[%d] / Removals[%d]", conf_file, len(updates), len(removals))
contents = self.get_file(inst, conf_file)
assert len(contents) > 0, "Configuration file '%s' is empty" % conf_file
first = True
space = "" if noSpaces else " "
## Keys we want to update/insert
for key in sorted(updates.keys()):
val = updates[key]
hstore_line = "%s%s=%s%s" % (key, space, space, val)
regex = "^(?:#)*[\s]*%s[ ]*=[ ]*.*" % re.escape(key)
m = re.search(regex, contents, re.MULTILINE)
if not m:
if first: contents += "\n"
contents += hstore_line + "\n"
first = False
LOG.debug("Added '%s' in %s with value '%s'" % (key, conf_file, val))
else:
contents = contents.replace(m.group(0), hstore_line)
LOG.debug("Updated '%s' in %s with value '%s'" % (key, conf_file, val))
## IF
## FOR
## Keys we need to completely remove from the file
for key in removals:
if contents.find(key) != -1:
regex = "%s[ ]*=.*" % re.escape(key)
contents = re.sub(regex, "", contents)
LOG.debug("Removed '%s' in %s" % (key, conf_file))
## FOR
## FOR
sio = StringIO()
sio.write(contents)
with settings(host_string=inst.public_dns_name):
put(local_path=sio, remote_path=conf_file)
## WITH
## DEF
def __resetDebugging__(self, inst):
with settings(host_string=inst.public_dns_name):
with cd(self.hstore_dir):
run("git checkout %s -- %s" % (self.env["hstore.git_options"], "log4j.properties"))
## DEF
def __updateLog4j__(self, inst, debug=[], trace=[]):
LOG.info("Updating log4j properties - DEBUG[%d] / TRACE[%d]", len(debug), len(trace))
conf_file = os.path.join(self.hstore_dir, "log4j.properties")
targetLevels = {
"DEBUG": debug,
"TRACE": trace,
}
with settings(host_string=inst.public_dns_name):
contents = self.get_file(inst, conf_file)
assert len(contents) > 0, "Configuration file '%s' is empty" % conf_file
# Go through the file and update anything that is already there
baseRegex = r"(log4j\.logger\.(?:%s))[\s]*=[\s]*(?:INFO|DEBUG|TRACE)(|,[\s]+[\w]+)"
for level, clazzes in targetLevels.iteritems():
contents = re.sub(baseRegex % "|".join(map(string.strip, clazzes)),
r"\1="+level+r"\2",
contents, flags=re.IGNORECASE)
# Then add in anybody that is missing
first = True
for level, clazzes in targetLevels.iteritems():
for clazz in clazzes:
if contents.find(clazz) == -1:
if first: contents += "\n"
contents += "\nlog4j.logger.%s=%s" % (clazz, level)
first = False
## FOR
sio = StringIO()
sio.write(contents)
put(local_path=sio, remote_path=conf_file)
## WITH
## DEF
## ----------------------------------------------
## __clearLogs__
## ----------------------------------------------
def __clearLogs__(self, inst):
"""Remove all of the log files on the remote cluster"""
with settings(host_string=inst.public_dns_name):
with settings(warn_only=True):
LOG.info("Clearing H-Store log files [%s]" % self.env["hstore.git_branch"])
log_dir = self.env.get("site.log_dir", os.path.join(self.hstore_dir, "obj/logs/sites"))
run("rm -rf %s/*" % log_dir)
## WITH
## DEF
## CLASS | gpl-3.0 |
tombstone/models | official/nlp/configs/bert_test.py | 1 | 2476 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BERT configurations and models instantiation."""
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
class BertModelsTest(tf.test.TestCase):
def test_network_invocation(self):
config = bert.BertPretrainerConfig(
encoder=encoders.TransformerEncoderConfig(vocab_size=10, num_layers=1))
_ = bert.instantiate_pretrainer_from_cfg(config)
# Invokes with classification heads.
config = bert.BertPretrainerConfig(
encoder=encoders.TransformerEncoderConfig(vocab_size=10, num_layers=1),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
])
_ = bert.instantiate_pretrainer_from_cfg(config)
with self.assertRaises(ValueError):
config = bert.BertPretrainerConfig(
encoder=encoders.TransformerEncoderConfig(
vocab_size=10, num_layers=1),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence"),
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
])
_ = bert.instantiate_pretrainer_from_cfg(config)
def test_checkpoint_items(self):
config = bert.BertPretrainerConfig(
encoder=encoders.TransformerEncoderConfig(vocab_size=10, num_layers=1),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
])
encoder = bert.instantiate_pretrainer_from_cfg(config)
self.assertSameElements(
encoder.checkpoint_items.keys(),
["encoder", "masked_lm", "next_sentence.pooler_dense"])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
samuelhavron/heroku-buildpack-python | Python-3.4.3/Lib/idlelib/Debugger.py | 76 | 16347 | import os
import bdb
from tkinter import *
from idlelib.WindowList import ListedToplevel
from idlelib.ScrolledList import ScrolledList
from idlelib import macosxSupport
class Idb(bdb.Bdb):
def __init__(self, gui):
self.gui = gui
bdb.Bdb.__init__(self)
def user_line(self, frame):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame)
def user_exception(self, frame, info):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame, info)
def in_rpc_code(self, frame):
if frame.f_code.co_filename.count('rpc.py'):
return True
else:
prev_frame = frame.f_back
if prev_frame.f_code.co_filename.count('Debugger.py'):
# (that test will catch both Debugger.py and RemoteDebugger.py)
return False
return self.in_rpc_code(prev_frame)
def __frame2message(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
basename = os.path.basename(filename)
message = "%s:%s" % (basename, lineno)
if code.co_name != "?":
message = "%s: %s()" % (message, code.co_name)
return message
class Debugger:
vstack = vsource = vlocals = vglobals = None
def __init__(self, pyshell, idb=None):
if idb is None:
idb = Idb(self)
self.pyshell = pyshell
self.idb = idb
self.frame = None
self.make_gui()
self.interacting = 0
def run(self, *args):
try:
self.interacting = 1
return self.idb.run(*args)
finally:
self.interacting = 0
def close(self, event=None):
if self.interacting:
self.top.bell()
return
if self.stackviewer:
self.stackviewer.close(); self.stackviewer = None
# Clean up pyshell if user clicked debugger control close widget.
# (Causes a harmless extra cycle through close_debugger() if user
# toggled debugger from pyshell Debug menu)
self.pyshell.close_debugger()
# Now close the debugger control window....
self.top.destroy()
def make_gui(self):
pyshell = self.pyshell
self.flist = pyshell.flist
self.root = root = pyshell.root
self.top = top = ListedToplevel(root)
self.top.wm_title("Debug Control")
self.top.wm_iconname("Debug")
top.wm_protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<Escape>", self.close)
#
self.bframe = bframe = Frame(top)
self.bframe.pack(anchor="w")
self.buttons = bl = []
#
self.bcont = b = Button(bframe, text="Go", command=self.cont)
bl.append(b)
self.bstep = b = Button(bframe, text="Step", command=self.step)
bl.append(b)
self.bnext = b = Button(bframe, text="Over", command=self.next)
bl.append(b)
self.bret = b = Button(bframe, text="Out", command=self.ret)
bl.append(b)
self.bret = b = Button(bframe, text="Quit", command=self.quit)
bl.append(b)
#
for b in bl:
b.configure(state="disabled")
b.pack(side="left")
#
self.cframe = cframe = Frame(bframe)
self.cframe.pack(side="left")
#
if not self.vstack:
self.__class__.vstack = BooleanVar(top)
self.vstack.set(1)
self.bstack = Checkbutton(cframe,
text="Stack", command=self.show_stack, variable=self.vstack)
self.bstack.grid(row=0, column=0)
if not self.vsource:
self.__class__.vsource = BooleanVar(top)
self.bsource = Checkbutton(cframe,
text="Source", command=self.show_source, variable=self.vsource)
self.bsource.grid(row=0, column=1)
if not self.vlocals:
self.__class__.vlocals = BooleanVar(top)
self.vlocals.set(1)
self.blocals = Checkbutton(cframe,
text="Locals", command=self.show_locals, variable=self.vlocals)
self.blocals.grid(row=1, column=0)
if not self.vglobals:
self.__class__.vglobals = BooleanVar(top)
self.bglobals = Checkbutton(cframe,
text="Globals", command=self.show_globals, variable=self.vglobals)
self.bglobals.grid(row=1, column=1)
#
self.status = Label(top, anchor="w")
self.status.pack(anchor="w")
self.error = Label(top, anchor="w")
self.error.pack(anchor="w", fill="x")
self.errorbg = self.error.cget("background")
#
self.fstack = Frame(top, height=1)
self.fstack.pack(expand=1, fill="both")
self.flocals = Frame(top)
self.flocals.pack(expand=1, fill="both")
self.fglobals = Frame(top, height=1)
self.fglobals.pack(expand=1, fill="both")
#
if self.vstack.get():
self.show_stack()
if self.vlocals.get():
self.show_locals()
if self.vglobals.get():
self.show_globals()
def interaction(self, message, frame, info=None):
self.frame = frame
self.status.configure(text=message)
#
if info:
type, value, tb = info
try:
m1 = type.__name__
except AttributeError:
m1 = "%s" % str(type)
if value is not None:
try:
m1 = "%s: %s" % (m1, str(value))
except:
pass
bg = "yellow"
else:
m1 = ""
tb = None
bg = self.errorbg
self.error.configure(text=m1, background=bg)
#
sv = self.stackviewer
if sv:
stack, i = self.idb.get_stack(self.frame, tb)
sv.load_stack(stack, i)
#
self.show_variables(1)
#
if self.vsource.get():
self.sync_source_line()
#
for b in self.buttons:
b.configure(state="normal")
#
self.top.wakeup()
self.root.mainloop()
#
for b in self.buttons:
b.configure(state="disabled")
self.status.configure(text="")
self.error.configure(text="", background=self.errorbg)
self.frame = None
def sync_source_line(self):
frame = self.frame
if not frame:
return
filename, lineno = self.__frame2fileline(frame)
if filename[:1] + filename[-1:] != "<>" and os.path.exists(filename):
self.flist.gotofileline(filename, lineno)
def __frame2fileline(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
return filename, lineno
def cont(self):
self.idb.set_continue()
self.root.quit()
def step(self):
self.idb.set_step()
self.root.quit()
def next(self):
self.idb.set_next(self.frame)
self.root.quit()
def ret(self):
self.idb.set_return(self.frame)
self.root.quit()
def quit(self):
self.idb.set_quit()
self.root.quit()
stackviewer = None
def show_stack(self):
if not self.stackviewer and self.vstack.get():
self.stackviewer = sv = StackViewer(self.fstack, self.flist, self)
if self.frame:
stack, i = self.idb.get_stack(self.frame, None)
sv.load_stack(stack, i)
else:
sv = self.stackviewer
if sv and not self.vstack.get():
self.stackviewer = None
sv.close()
self.fstack['height'] = 1
def show_source(self):
if self.vsource.get():
self.sync_source_line()
def show_frame(self, stackitem):
self.frame = stackitem[0] # lineno is stackitem[1]
self.show_variables()
localsviewer = None
globalsviewer = None
def show_locals(self):
lv = self.localsviewer
if self.vlocals.get():
if not lv:
self.localsviewer = NamespaceViewer(self.flocals, "Locals")
else:
if lv:
self.localsviewer = None
lv.close()
self.flocals['height'] = 1
self.show_variables()
def show_globals(self):
gv = self.globalsviewer
if self.vglobals.get():
if not gv:
self.globalsviewer = NamespaceViewer(self.fglobals, "Globals")
else:
if gv:
self.globalsviewer = None
gv.close()
self.fglobals['height'] = 1
self.show_variables()
def show_variables(self, force=0):
lv = self.localsviewer
gv = self.globalsviewer
frame = self.frame
if not frame:
ldict = gdict = None
else:
ldict = frame.f_locals
gdict = frame.f_globals
if lv and gv and ldict is gdict:
ldict = None
if lv:
lv.load_dict(ldict, force, self.pyshell.interp.rpcclt)
if gv:
gv.load_dict(gdict, force, self.pyshell.interp.rpcclt)
def set_breakpoint_here(self, filename, lineno):
self.idb.set_break(filename, lineno)
def clear_breakpoint_here(self, filename, lineno):
self.idb.clear_break(filename, lineno)
def clear_file_breaks(self, filename):
self.idb.clear_all_file_breaks(filename)
def load_breakpoints(self):
"Load PyShellEditorWindow breakpoints into subprocess debugger"
for editwin in self.pyshell.flist.inversedict:
filename = editwin.io.filename
try:
for lineno in editwin.breakpoints:
self.set_breakpoint_here(filename, lineno)
except AttributeError:
continue
class StackViewer(ScrolledList):
def __init__(self, master, flist, gui):
if macosxSupport.isAquaTk():
# At least on with the stock AquaTk version on OSX 10.4 you'll
# get an shaking GUI that eventually kills IDLE if the width
# argument is specified.
ScrolledList.__init__(self, master)
else:
ScrolledList.__init__(self, master, width=80)
self.flist = flist
self.gui = gui
self.stack = []
def load_stack(self, stack, index=None):
self.stack = stack
self.clear()
for i in range(len(stack)):
frame, lineno = stack[i]
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
import linecache
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(), line %d: %s" % (modname, funcname,
lineno, sourceline)
if i == index:
item = "> " + item
self.append(item)
if index is not None:
self.select(index)
def popup_event(self, event):
"override base method"
if self.stack:
return ScrolledList.popup_event(self, event)
def fill_menu(self):
"override base method"
menu = self.menu
menu.add_command(label="Go to source line",
command=self.goto_source_line)
menu.add_command(label="Show stack frame",
command=self.show_stack_frame)
def on_select(self, index):
"override base method"
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def on_double(self, index):
"override base method"
self.show_source(index)
def goto_source_line(self):
index = self.listbox.index("active")
self.show_source(index)
def show_stack_frame(self):
index = self.listbox.index("active")
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def show_source(self, index):
if not (0 <= index < len(self.stack)):
return
frame, lineno = self.stack[index]
code = frame.f_code
filename = code.co_filename
if os.path.isfile(filename):
edit = self.flist.open(filename)
if edit:
edit.gotoline(lineno)
class NamespaceViewer:
def __init__(self, master, title, dict=None):
width = 0
height = 40
if dict:
height = 20*len(dict) # XXX 20 == observed height of Entry widget
self.master = master
self.title = title
import reprlib
self.repr = reprlib.Repr()
self.repr.maxstring = 60
self.repr.maxother = 60
self.frame = frame = Frame(master)
self.frame.pack(expand=1, fill="both")
self.label = Label(frame, text=title, borderwidth=2, relief="groove")
self.label.pack(fill="x")
self.vbar = vbar = Scrollbar(frame, name="vbar")
vbar.pack(side="right", fill="y")
self.canvas = canvas = Canvas(frame,
height=min(300, max(40, height)),
scrollregion=(0, 0, width, height))
canvas.pack(side="left", fill="both", expand=1)
vbar["command"] = canvas.yview
canvas["yscrollcommand"] = vbar.set
self.subframe = subframe = Frame(canvas)
self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw")
self.load_dict(dict)
dict = -1
def load_dict(self, dict, force=0, rpc_client=None):
if dict is self.dict and not force:
return
subframe = self.subframe
frame = self.frame
for c in list(subframe.children.values()):
c.destroy()
self.dict = None
if not dict:
l = Label(subframe, text="None")
l.grid(row=0, column=0)
else:
#names = sorted(dict)
###
# Because of (temporary) limitations on the dict_keys type (not yet
# public or pickleable), have the subprocess to send a list of
# keys, not a dict_keys object. sorted() will take a dict_keys
# (no subprocess) or a list.
#
# There is also an obscure bug in sorted(dict) where the
# interpreter gets into a loop requesting non-existing dict[0],
# dict[1], dict[2], etc from the RemoteDebugger.DictProxy.
###
keys_list = dict.keys()
names = sorted(keys_list)
###
row = 0
for name in names:
value = dict[name]
svalue = self.repr.repr(value) # repr(value)
# Strip extra quotes caused by calling repr on the (already)
# repr'd value sent across the RPC interface:
if rpc_client:
svalue = svalue[1:-1]
l = Label(subframe, text=name)
l.grid(row=row, column=0, sticky="nw")
l = Entry(subframe, width=0, borderwidth=0)
l.insert(0, svalue)
l.grid(row=row, column=1, sticky="nw")
row = row+1
self.dict = dict
# XXX Could we use a <Configure> callback for the following?
subframe.update_idletasks() # Alas!
width = subframe.winfo_reqwidth()
height = subframe.winfo_reqheight()
canvas = self.canvas
self.canvas["scrollregion"] = (0, 0, width, height)
if height > 300:
canvas["height"] = 300
frame.pack(expand=1)
else:
canvas["height"] = height
frame.pack(expand=0)
def close(self):
self.frame.destroy()
| mit |
markstoehr/spectral_features | filters/transforms.py | 1 | 12901 | from __future__ import division
import numpy as np
import filterbank as fb
from scipy.ndimage.filters import maximum_filter, convolve
from nitime.algorithms.spectral import dpss_windows
def preemphasis(x,preemph=.95):
return np.append( (x[1:] - .95*x[:-1])[::-1],x[0])[::-1]
def process_wav(x):
return x.astype(float)/2**15
def spec_freq_avg(x,fbank1,fbank2,oversampling,return_midpoints=False):
"""
wavelet-averaged spectrogram--an approximation
to MFCCs using wavelets rather than Mel-scale filters
"""
# we just want the bandwidth of the lowpass filter
N = x.size
supp_mult=4
bwphi2 = fb.filter_freq(fbank2)[2]
Nfilt = fbank1.psifilters.shape[1]
N1 = 2**int(.5+(np.log2(2*np.pi/bwphi2)))
fs = np.abs(fbank1.psifilters[:,::Nfilt/(N1*supp_mult)])
window = np.fft.ifft(fbank2.phifilter)
window = np.hstack((window[Nfilt-N1*supp_mult/2:],
window[:N1*supp_mult/2]))
# number of output frames
nframes = int(.5 + N/N1*2**oversampling)
# get the indices for each sample
indices = (np.arange(N1*supp_mult,dtype=int)-int((N1*supp_mult)/2))[np.newaxis, : ] + int(N1/2**oversampling)*np.arange(nframes,dtype=int)[:,np.newaxis]
# symmetrize the front
indices *= (2*(indices > 0)-1)
# symmetrize the tail
tail_indices = indices > N-1
indices[tail_indices] = N-1 - (indices[tail_indices] - N+1)
frames = np.abs(np.fft.fft(x[indices] * window))
if return_midpoints:
return np.dot(fs,frames.T), indices[:,int(indices.shape[1]/2+.5)]
return np.dot(fs,frames.T)
def spectrogram(x,sample_rate,freq_cutoff,winsize,nfft,oversampling,
h,return_midpoints=False):
"""
Compute a simple spectrogram using a given window
"""
N = len(x)
nframes = int(.5 + N/winsize*2**oversampling)
greater_than_winlength = winsize*np.ones((nframes,nfft)) > np.arange(nfft)
indices = (np.arange(nfft,dtype=int)-int(nfft/2))[np.newaxis, : ] + int(nfft/2**oversampling)*np.arange(nframes,dtype=int)[:,np.newaxis]
indices *= (2*(indices > 0)-1)
# symmetrize the tail
tail_indices = indices > N-1
indices[tail_indices] = N-1 - (indices[tail_indices] - N+1)
"""
"""
abs_avg = np.zeros(indices.shape)
for i in xrange(5):
f = np.fft.fft((x[indices]*greater_than_winlength) * zero_pad_window(h[i],nfft-winsize),nfft)
f_mv = f * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
abs_avg += np.abs(f_mv)
abs_avg/=5
cutoff_idx = int(freq_cutoff/sample_rate * winsize)
if return_midpoints:
return abs_avg[:,:cutoff_idx], indices[:,int(indices.shape[1]/2+.5)]
return abs_avg[:,:cutoff_idx]
def zero_pad_window(w,n_pad):
w0 = np.zeros(w.shape[0] + n_pad)
w0[:w.shape[0]] = w
return w0
def spectrogram_magnitude_gradients(x,sample_rate,freq_cutoff,winsize,nfft,oversampling,
h,dh,tt,return_midpoints=False):
"""
Returns the spectrogram as well as magnitude gradients
all of these are multitaper
"""
N = len(x)
nframes = int(.5 + N/winsize*2**oversampling)
greater_than_winlength = winsize*np.ones((nframes,nfft)) > np.arange(nfft)
indices = (np.arange(nfft,dtype=int)-int(nfft/2))[np.newaxis, : ] + int(nfft/2**oversampling)*np.arange(nframes,dtype=int)[:,np.newaxis]
indices *= (2*(indices > 0)-1)
# symmetrize the tail
tail_indices = indices > N-1
indices[tail_indices] = N-1 - (indices[tail_indices] - N+1)
"""
"""
abs_avg = np.zeros(indices.shape)
avg_dM_dt = np.zeros(indices.shape)
avg_dM_dw = np.zeros(indices.shape)
for i in xrange(5):
f = np.fft.fft((x[indices]*greater_than_winlength) * zero_pad_window(h[i],nfft-winsize),nfft)
f_mv = f * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
abs_avg += np.abs(f_mv)
df = np.fft.fft((x[indices]*greater_than_winlength) * zero_pad_window(dh[i],nfft-winsize),nfft)
df_mv = df * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
tf = np.fft.fft((x[indices]*greater_than_winlength)*zero_pad_window(tt*h[i],nfft-winsize) ,nfft)
tf_mv = tf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
abs_f_mv = np.abs(f_mv)**2
dM_dt = np.real((df_mv * f_mv.conj())/abs_f_mv)
dM_dw = - np.imag((tf_mv * f_mv.conj())/abs_f_mv)
avg_dM_dt += dM_dt
avg_dM_dw += dM_dw
abs_avg/=5
avg_dM_dt /= 5
avg_dM_dw /= 5
cutoff_idx = int(freq_cutoff/sample_rate * winsize)
if return_midpoints:
return abs_avg[:,:cutoff_idx], avg_dM_dt[:,:cutoff_idx], avg_dM_dw[:,:cutoff_idx], indices[:,int(indices.shape[1]/2+.5)]
return abs_avg[:,:cutoff_idx], avg_dM_dt[:,:cutoff_idx], avg_dM_dw[:,:cutoff_idx]
def spectrogram_reassignment(x,sample_rate,freq_cutoff,winsize,nfft,oversampling,
h,dh,tt,return_midpoints=False):
"""
Returns the spectrogram as well as magnitude gradients
all of these are multitaper
"""
N = len(x)
nframes = int(.5 + N/winsize*2**oversampling)
greater_than_winlength = winsize*np.ones((nframes,nfft)) > np.arange(nfft)
indices = (np.arange(nfft,dtype=int)-int(nfft/2))[np.newaxis, : ] + int(nfft/2**oversampling)*np.arange(nframes,dtype=int)[:,np.newaxis]
indices *= (2*(indices > 0)-1)
# symmetrize the tail
tail_indices = indices > N-1
indices[tail_indices] = N-1 - (indices[tail_indices] - N+1)
"""
"""
abs_avg = np.zeros(indices.shape)
avg_dM_dt = np.zeros(indices.shape)
avg_dM_dw = np.zeros(indices.shape)
for i in xrange(5):
f = np.fft.fft((x[indices]*greater_than_winlength) * zero_pad_window(h[i],nfft-winsize),nfft)
f_mv = f * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
abs_avg += np.abs(f_mv)
df = np.fft.fft((x[indices]*greater_than_winlength) * zero_pad_window(dh[i],nfft-winsize),nfft)
df_mv = df * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
tf = np.fft.fft((x[indices]*greater_than_winlength)*zero_pad_window(tt*h[i],nfft-winsize) ,nfft)
tf_mv = tf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
abs_f_mv = np.abs(f_mv)**2
t_hat = np.real((df_mv * f_mv.conj())/abs_f_mv)
dM_dw = - np.imag((tf_mv * f_mv.conj())/abs_f_mv)
avg_dM_dt += dM_dt
avg_dM_dw += dM_dw
abs_avg/=5
avg_dM_dt /= 5
avg_dM_dw /= 5
cutoff_idx = int(freq_cutoff/sample_rate * winsize)
if return_midpoints:
return abs_avg[:,:cutoff_idx], avg_dM_dt[:,:cutoff_idx], avg_dM_dw[:,:cutoff_idx], indices[:,int(indices.shape[1]/2+.5)]
return abs_avg[:,:cutoff_idx], avg_dM_dt[:,:cutoff_idx], avg_dM_dw[:,:cutoff_idx]
def binary_phase_features(x,sample_rate,freq_cutoff,winsize,nfft,oversampling,h,dh,tt,gfilter,gsigma,fthresh,othresh,spread_length=3,return_midpoints=True):
"""
We assume x has already been preemphasized, this just recovers the frequency components of the signal.
"""
N = len(x)
nframes = int(.5 + N/winsize*2**oversampling)
greater_than_winlength = winsize*np.ones((nframes,nfft)) > np.arange(nfft)
indices = (np.arange(nfft,dtype=int)-int(nfft/2))[np.newaxis, : ] + int(nfft/2**oversampling)*np.arange(nframes,dtype=int)[:,np.newaxis]
indices *= (2*(indices > 0)-1)
# symmetrize the tail
tail_indices = indices > N-1
indices[tail_indices] = N-1 - (indices[tail_indices] - N+1)
gt,gf = gfilter.shape
gdwfilter = -(np.mgrid[:gt,:gf]-3.5)[1]/gsigma * gfilter
gdtfilter = -(np.mgrid[:gt,:gf]-3.5)[0]/gsigma * gfilter
"""
"""
abs_avg = np.zeros(indices.shape)
avg_dphi_dt = np.zeros(indices.shape)
avg_dphi_dw = np.zeros(indices.shape)
for i in xrange(5):
f = np.fft.fft((x[indices]*greater_than_winlength) * h[i])
f_mv = f * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
abs_avg += np.abs(f_mv)
df = np.fft.fft((x[indices]*greater_than_winlength) * dh[i])
df_mv = df * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
tf = np.fft.fft((x[indices]*greater_than_winlength) * (tt*h[i]))
tf_mv = tf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/nfft)
abs_f_mv = np.abs(f_mv)**2
dphi_dt = np.imag((df_mv * f_mv.conj())/abs_f_mv)
dphi_dw = - np.real((tf_mv * f_mv.conj())/abs_f_mv)
avg_dphi_dt += dphi_dt
avg_dphi_dw += dphi_dw
abs_avg/=5
avg_dphi_dt /= 5
avg_dphi_dw /= 5
filter_d2phi_dwdt = convolve(avg_dphi_dt,gdwfilter)
filter_d2phi_dt2 = convolve(avg_dphi_dt,gdtfilter)
F = np.zeros(filter_d2phi_dwdt.shape + (4,),dtype=np.uint8)
S = np.sign(filter_d2phi_dt2)*( np.abs(filter_d2phi_dt2) > othresh)
F[:,:,0] = filter_d2phi_dwdt > fthresh
F[:,:,1] = F[:,:,0] * maximum_filter(S == 1,footprint=np.eye(spread_length),mode='constant')
F[:,:,2] = F[:,:,0] * maximum_filter(S == 0,footprint=np.ones((1,spread_length)),mode='constant')
F[:,:,3] = F[:,:,0] * maximum_filter(S == -1,footprint=np.eye(spread_length)[::-1],mode='constant')
cutoff_idx = int(freq_cutoff/sample_rate * winsize)
if return_midpoints:
return abs_avg[:,:cutoff_idx], F[:,:cutoff_idx], indices[:,int(indices.shape[1]/2+.5)]
return abs_avg[:,:cutoff_idx], F[:,:cutoff_idx]
def wavelet_scat(x,fbank,
oversampling=1,
psi_mask = None,
x_resolution = 0):
"""
"""
if psi_mask is None:
psi_mask = np.ones(filterbank.psifilters.shape[0],dtype=bool)
N = x.shape[0]
assert x.ndim == 1
bwpsi, bwphi = fb.filter_freq(fbank)[1:]
N_padded = fbank.psifilters.shape[1]
n_psi = fbank.psifilters.shape[0]
x = pad_signal(x,N_padded)
xf = np.fft.fft(x)
# compute the downsampling factor as a power of 2
downsample = max(int(.5 + np.log2(2*np.pi/bwphi)) - j0 - oversampling,0)
x_phi = unpad_signal(np.real(conv_sub(xf, fbank.phifilter, downsample)),
downsample,N)
x_psi = np.zeros((n_psi,N))
for i,psifilter in enumerate(fbank.psifilters):
downsample = max(int(.5 + np.log2(np.pi/bwsi[i])) - j0 - max(1,oversampling),0)
x_psi[i] = unpad_signal(conv_sub(xf,psifilter,downsample),downsample, N)
return x_phi,x_psi
def pad_signal(x, Npad):
"""
The input signal x is padded to be of length Npad
using a symmetric boundary so that any discontinuities are as
far from the signal as possible
the signal is to the far left of the output signal
it allows for convolutions to be calculated with lower
error
No handling for complex input yet
We assume that Npad is no greater than twice the signal length
"""
Norig = x.shape[0]
y = np.zeros(Norig*2)
y[:Norig] = x
y[Norig:] = x[::-1]
midpoint = Norig/2
rm_nsamples = y.shape[0] - Npad
# we want to remove a chunk with the property that
# start:end is removed and Npad - start == y.shape[0] - end
# deriving further we get
# start == Npad - y.shape[0] + end
start = Norig + int(midpoint - rm_nsamples/2)
end = start + rm_nsamples
#
y[start:Npad] = y[end:]
return y[:Npad]
def unpad_signal(x, resolution, Norig):
"""
The signal x is assumed to be a padded version at resolution
2**resolution of a signal of length Norig
We unpad it in this function
"""
return x[:int(Norig/2**resolution)]
def conv_sub(xf, filt, ds):
"""
TODO: make work for a truncated filter to make processing
much faster
Parameters
----------
xf: array-like
Fourier transform of the signal assume that it has even
length (this can be produced via padding from an earlier step)
filt: array-like
Filter to convolve the signal with
ds: int
downsampling factor
"""
N = xf.shape[0]
# modified filter has the correct length and corrects for the
# middle point
mod_filter = np.zeros(N)
mod_filter[:int(N/2)] = filt[:int(N/2)]
mod_filter[int(N/2)] = (filt[int(N/2)] + filt[-int(N/2)])/2
mod_filter[int(N/2)+1:] = filt[1-int(N/2):]
yf = mod_filter * xf
# compute the downsampling factor
downsampj = ds + np.log2(yf.shape[0]/N)
if downsampj > 0:
yf_ds = yf.reshape(int(N/2**downsampj),2**downsampj).sum(1)
elif downsampj < 0:
yf_ds = np.zeros(2**(-downsampj)*yf.shape[0] )
yf_ds[:yf.shape[0]] = yf
else:
yf_ds = yf
return np.fft.ifft(yf_ds)/2**(int(ds/2))
| gpl-3.0 |
NullSoldier/django | tests/template_tests/test_logging.py | 210 | 2286 | from __future__ import unicode_literals
import logging
from django.template import Engine, Variable, VariableDoesNotExist
from django.test import SimpleTestCase
class TestHandler(logging.Handler):
def __init__(self):
super(TestHandler, self).__init__()
self.log_record = None
def emit(self, record):
self.log_record = record
class VariableResolveLoggingTests(SimpleTestCase):
def setUp(self):
self.test_handler = TestHandler()
self.logger = logging.getLogger('django.template')
self.original_level = self.logger.level
self.logger.addHandler(self.test_handler)
self.logger.setLevel(logging.DEBUG)
def tearDown(self):
self.logger.removeHandler(self.test_handler)
self.logger.level = self.original_level
def test_log_on_variable_does_not_exist_silent(self):
class TestObject(object):
class SilentDoesNotExist(Exception):
silent_variable_failure = True
@property
def template_name(self):
return "template"
@property
def template(self):
return Engine().from_string('')
@property
def article(self):
raise TestObject.SilentDoesNotExist("Attribute does not exist.")
def __iter__(self):
return iter(attr for attr in dir(TestObject) if attr[:2] != "__")
def __getitem__(self, item):
return self.__dict__[item]
Variable('article').resolve(TestObject())
self.assertEqual(
self.test_handler.log_record.msg,
'template - Attribute does not exist.'
)
def test_log_on_variable_does_not_exist_not_silent(self):
with self.assertRaises(VariableDoesNotExist):
Variable('article.author').resolve({'article': {'section': 'News'}})
self.assertEqual(
self.test_handler.log_record.msg,
'unknown - Failed lookup for key [author] in %r' %
("{%r: %r}" % ('section', 'News'), )
)
def test_no_log_when_variable_exists(self):
Variable('article.section').resolve({'article': {'section': 'News'}})
self.assertIsNone(self.test_handler.log_record)
| bsd-3-clause |
gunan/tensorflow | tensorflow/python/keras/layers/preprocessing/text_vectorization.py | 1 | 36301 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import operator
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_preprocessing_layer import Combiner
from tensorflow.python.keras.engine.base_preprocessing_layer import CombinerPreprocessingLayer
from tensorflow.python.keras.layers.preprocessing import categorical_encoding
from tensorflow.python.keras.layers.preprocessing import index_lookup
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import keras_export
LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation"
SPLIT_ON_WHITESPACE = "whitespace"
TFIDF = categorical_encoding.TFIDF
INT = categorical_encoding.INT
BINARY = categorical_encoding.BINARY
COUNT = categorical_encoding.COUNT
# This is an explicit regex of all the tokens that will be stripped if
# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other
# stripping, a Callable should be passed into the 'standardize' arg.
DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']'
# The string tokens in the extracted vocabulary
_VOCAB_NAME = "vocab"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
# The IDF data for the OOV token
_OOV_IDF_NAME = "oov_idf"
# The string tokens in the full vocabulary
_ACCUMULATOR_VOCAB_NAME = "vocab"
# The total counts of each token in the vocabulary
_ACCUMULATOR_COUNTS_NAME = "counts"
# The number of documents / examples that each token appears in.
_ACCUMULATOR_DOCUMENT_COUNTS = "document_counts"
# The total number of documents / examples in the dataset.
_ACCUMULATOR_NUM_DOCUMENTS = "num_documents"
@keras_export(
"keras.layers.experimental.preprocessing.TextVectorization", v1=[])
class TextVectorization(CombinerPreprocessingLayer):
"""Text vectorization layer.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one sample = one string) into either a list of
token indices (one sample = 1D tensor of integer token indices) or a dense
representation (one sample = 1D tensor of float values representing data about
the sample's tokens).
If desired, the user can call this layer's adapt() method on a dataset.
When this layer is adapted, it will analyze the dataset, determine the
frequency of individual string values, and create a 'vocabulary' from them.
This vocabulary can have unlimited size or be capped, depending on the
configuration options for this layer; if there are more unique values in the
input than the maximum vocabulary size, the most frequent terms will be used
to create the vocabulary.
The processing of each sample contains the following steps:
1) standardize each sample (usually lowercasing + punctuation stripping)
2) split each sample into substrings (usually words)
3) recombine substrings into tokens (usually ngrams)
4) index tokens (associate a unique int value with each token)
5) transform each sample using this index, either into a vector of ints or
a dense float vector.
Some notes on passing Callables to customize splitting and normalization for
this layer:
1) Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `tf.keras.utils.register_keras_serializable` for more
details).
2) When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3) When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`. The callable should
return a Tensor with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to", "split],
["another", "string", "to", "split"]]`. This makes the callable site
natively compatible with `tf.strings.split()`.
Attributes:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary.
standardize: Optional specification for standardization to apply to the
input text. Values can be None (no standardization),
'lower_and_strip_punctuation' (lowercase and remove punctuation) or a
Callable. Default is 'lower_and_strip_punctuation'.
split: Optional specification for splitting the input text. Values can be
None (no splitting), 'whitespace' (split on ASCII whitespace), or a
Callable. The default is 'whitespace'.
ngrams: Optional specification for ngrams to create from the possibly-split
input text. Values can be None, an integer or tuple of integers; passing
an integer will create ngrams up to that integer, and passing a tuple of
integers will create ngrams for the specified values in the tuple. Passing
None means that no ngrams will be created.
output_mode: Optional specification for the output of the layer. Values can
be "int", "binary", "count" or "tf-idf", configuring the layer as follows:
"int": Outputs integer indices, one integer index per split string
token.
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
output_sequence_length: Only valid in INT mode. If set, the output will have
its time dimension padded or truncated to exactly `output_sequence_length`
values, resulting in a tensor of shape [batch_size,
output_sequence_length] regardless of how many tokens resulted from the
splitting step. Defaults to None.
pad_to_max_tokens: Only valid in "binary", "count", and "tf-idf" modes. If
True, the output will have its feature axis padded to `max_tokens` even if
the number of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape [batch_size, max_tokens] regardless of
vocabulary size. Defaults to True.
Example:
This example instantiates a TextVectorization layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
```
max_features = 5000 # Maximum vocab size.
max_len = 40 # Sequence length to pad the outputs to.
# Create the layer.
vectorize_layer = text_vectorization.TextVectorization(
max_tokens=max_features,
output_mode='int',
output_sequence_length=max_len)
# Now that the vocab layer has been created, call `adapt` on the text-only
# dataset to create the vocabulary. You don't have to batch, but for large
# datasets this means we're not keeping spare copies of the dataset in memory.
vectorize_layer.adapt(text_dataset.batch(64))
# Create the model that uses the vectorize text layer
model = tf.keras.models.Sequential()
# Start by creating an explicit input layer. It needs to have a shape of (1,)
# (because we need to guarantee that there is exactly one string input per
# batch), and the dtype needs to be 'string'.
model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
# The first layer in our model is the vectorization layer. After this layer,
# we have a tensor of shape (batch_size, max_len) containing vocab indices.
model.add(vectorize_layer)
# Next, we add a layer to map those vocab indices into a space of
# dimensionality 'embedding_dims'. Note that we're using max_features+1 here,
# since there's an OOV token that gets added to the vocabulary in
# vectorize_layer.
model.add(tf.keras.layers.Embedding(max_features+1, embedding_dims))
# At this point, you have embedded float data representing your tokens, and
# can add whatever other layers you need to create your model.
```
"""
# TODO(momernick): Add an examples section to the docstring.
def __init__(self,
max_tokens=None,
standardize=LOWER_AND_STRIP_PUNCTUATION,
split=SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=INT,
output_sequence_length=None,
pad_to_max_tokens=True,
**kwargs):
# This layer only applies to string processing, and so should only have
# a dtype of 'string'.
if "dtype" in kwargs and kwargs["dtype"] != dtypes.string:
raise ValueError("TextVectorization may only have a dtype of string.")
elif "dtype" not in kwargs:
kwargs["dtype"] = dtypes.string
# 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)
layer_utils.validate_string_arg(
standardize,
allowable_strings=(LOWER_AND_STRIP_PUNCTUATION),
layer_name="TextVectorization",
arg_name="standardize",
allow_none=True,
allow_callables=True)
# 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)
layer_utils.validate_string_arg(
split,
allowable_strings=(SPLIT_ON_WHITESPACE),
layer_name="TextVectorization",
arg_name="split",
allow_none=True,
allow_callables=True)
# 'output_mode' must be one of (None, INT, COUNT, BINARY, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, COUNT, BINARY, TFIDF),
layer_name="TextVectorization",
arg_name="output_mode",
allow_none=True)
# 'ngrams' must be one of (None, int, tuple(int))
if not (ngrams is None or
isinstance(ngrams, int) or
isinstance(ngrams, tuple) and
all(isinstance(item, int) for item in ngrams)):
raise ValueError(("`ngrams` must be None, an integer, or a tuple of "
"integers. Got %s") % (ngrams,))
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is INT.
if (output_mode == INT and not (isinstance(output_sequence_length, int) or
(output_sequence_length is None))):
raise ValueError("`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. "
"Got %s" % output_sequence_length)
if output_mode != INT and output_sequence_length is not None:
raise ValueError("`output_sequence_length` must not be set if "
"`output_mode` is not 'int'.")
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens < 1:
raise ValueError("max_tokens must be > 1.")
self._max_tokens = max_tokens
# In INT mode, we have two reserved values (PAD and OOV). However, non-INT
# modes don't have a PAD value, so we only need to reserve one value.
self._reserved_values = 2 if output_mode == INT else 1
# In INT mode, the zero value is reserved for padding (per Keras standard
# padding approaches). In non-INT modes, there is no padding so we can set
# the OOV value to zero instead of one.
self._oov_value = 1 if output_mode == INT else 0
# We always reduce the max token number by 1 to account for the OOV token
# if it is set. Keras' use of the reserved number 0 for padding tokens,
# if the output is in INT mode, does not really count as a 'token' for
# vocabulary purposes, so we only reduce vocab size by 1 here.
self._max_vocab_size = max_tokens - 1 if max_tokens is not None else None
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._pad_to_max = pad_to_max_tokens
self._vocab_size = 0
self._called = False
super(TextVectorization, self).__init__(
combiner=_TextVectorizationCombiner(
self._max_vocab_size, compute_idf=output_mode == TFIDF),
**kwargs)
self._supports_ragged_inputs = True
reserve_zero = output_mode in [None, INT]
self._index_lookup_layer = self._get_index_lookup_class()(
max_tokens=max_tokens, reserve_zero=reserve_zero, dtype=dtypes.string)
# If this layer is configured for string or integer output, we do not
# create a vectorization layer (as the output is not vectorized).
if self._output_mode in [None, INT]:
return
if max_tokens is not None and self._pad_to_max:
max_elements = max_tokens
else:
max_elements = None
self._vectorize_layer = self._get_vectorization_class()(
max_tokens=max_elements, output_mode=self._output_mode)
# These are V1/V2 shim points. There are V1 implementations in the V1 class.
def _get_vectorization_class(self):
return categorical_encoding.CategoricalEncoding
def _get_table_data(self):
keys, values = self._table.export()
return (keys.numpy(), values.numpy())
def _get_index_lookup_class(self):
return index_lookup.IndexLookup
def _to_numpy(self, preprocessed_data):
"""Converts preprocessed inputs into numpy arrays."""
if isinstance(preprocessed_data, np.ndarray):
return preprocessed_data
return np.array(preprocessed_data.to_list())
# End of V1/V2 shim points.
def _assert_same_type(self, expected_type, values, value_name):
if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected %s type %s, got %s" %
(value_name, expected_type, values.dtype))
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def compute_output_shape(self, input_shape):
if self._output_mode != INT:
return tensor_shape.TensorShape([input_shape[0], self._max_tokens])
if self._output_mode == INT and self._split is None:
return input_shape
if self._output_mode == INT and self._split is not None:
input_shape = list(input_shape)
input_shape[1] = self._output_sequence_length
return tensor_shape.TensorShape(input_shape)
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = K.floatx() if self._output_mode == TFIDF else dtypes.int64
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Arguments:
data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
"""
if not reset_state:
raise ValueError("TextVectorization does not support streaming adapts.")
# Build the layer explicitly with the original data shape instead of relying
# on an implicit call to `build` in the base layer's `adapt`, since
# preprocessing changes the input shape.
if isinstance(data, np.ndarray):
if data.ndim == 1:
data = np.expand_dims(data, axis=-1)
self.build(data.shape)
preprocessed_inputs = self._to_numpy(self._preprocess(data))
elif isinstance(data, dataset_ops.DatasetV2):
# TODO(momernick): Replace this with a more V2-friendly API.
shape = dataset_ops.get_legacy_output_shapes(data)
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("The dataset passed to 'adapt' must contain a single "
"tensor value.")
if shape.rank == 1:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, -1))
self.build(dataset_ops.get_legacy_output_shapes(data))
preprocessed_inputs = data.map(self._preprocess)
else:
raise ValueError(
"adapt() requires a Dataset or a Numpy array as input, got {}".format(
type(data)))
super(TextVectorization, self).adapt(preprocessed_inputs, reset_state)
def get_vocabulary(self):
return self._index_lookup_layer.get_vocabulary()
def get_config(self):
config = {
"max_tokens": self._max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._pad_to_max,
}
base_config = super(TextVectorization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self,
vocab,
df_data=None,
oov_df_value=None,
append=False):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and DF data for this layer directly, instead
of analyzing a dataset through 'adapt'. It should be used whenever the vocab
(and optionally document frequency) information is already known. If
vocabulary data is already present in the layer, this method will either
replace it, if 'append' is set to False, or append to it (if 'append' is set
to True).
Arguments:
vocab: An array of string tokens.
df_data: An array of document frequency data. Only necessary if the layer
output_mode is TFIDF.
oov_df_value: The document frequency of the OOV token. Only necessary if
output_mode is TFIDF. OOV data is optional when appending additional
data in TFIDF mode; if an OOV value is supplied it will overwrite the
existing OOV value.
append: Whether to overwrite or append any existing vocabulary data.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when "binary", "count", and "tfidf" modes,
if "pad_to_max_tokens" is False and the layer itself has already been
called.
"""
if self._output_mode != TFIDF and df_data is not None:
raise ValueError("df_data should only be set if output_mode is TFIDF. "
"output_mode is %s." % self._output_mode)
if (self._output_mode in [BINARY, COUNT, TFIDF] and self._called and
not self._pad_to_max):
raise RuntimeError(("When using TextVectorization in {mode} mode and "
"pad_to_max_tokens is False, the vocabulary cannot "
"be changed after the layer is "
"called.").format(mode=self._output_mode))
current_table_size = self._index_lookup_layer.vocab_size()
self._index_lookup_layer.set_vocabulary(vocab, append)
# When doing raw or integer output, we don't have a Vectorize layer to
# manage. In this case, we can return directly.
if self._output_mode in [None, INT]:
return
if not self._pad_to_max or self._max_tokens is None:
num_tokens = self._index_lookup_layer.vocab_size() + self._reserved_values
self._vectorize_layer.set_num_elements(num_tokens)
# We're only _really_ appending if the table_size is nonzero. This is
# important for some sanity checks in tfidf mode (specifically, checking if
# oov_df_value is set or not) and handling existing tfidf weight data.
append = append if current_table_size > 0 else False
if self._output_mode == TFIDF:
if df_data is None:
raise ValueError("df_data must be set if output_mode is TFIDF")
if len(vocab) != len(df_data):
raise ValueError("df_data must be the same length as vocab. "
"len(df_data) is %s, len(vocab) is %s" %
(len(vocab), len(df_data)))
if not append and oov_df_value is None:
raise ValueError("You must pass an oov_df_value the first time "
"'set_vocabulary' is called when output_mode is "
"TFIDF.")
df_data = self._convert_to_ndarray(df_data)
if append:
# The existing IDF data is stored in a Keras weight, so we can get it
# by calling K.get_value() on the weight object. Take the first
# table_size+1 values in case we're padding the weight with zeros
existing_df_data = K.get_value(
self._vectorize_layer.tf_idf_weights)[:current_table_size + 1]
df_data = np.append(existing_df_data, df_data, axis=0)
# If we are appending and need to replace the OOV DF value, we can
# assign it over the existing OOV DF value at index 0 of the (already-
# concatenated) DF value array.
if oov_df_value is not None:
df_data[0] = oov_df_value
else:
# If we are not appending (that is, we have only new data) we need to
# insert the OOV value to the front of the array. (This is a append to
# the head, not a replacement of the zeroth value.)
if not isinstance(oov_df_value, np.ndarray):
oov_df_value = np.array([oov_df_value])
df_data = np.insert(df_data, 0, oov_df_value)
self._vectorize_layer.set_tfidf_data(df_data)
def build(self, input_shape):
# We have to use 'and not ==' here, because input_shape[1] !/== 1 can result
# in None for undefined shape axes. If using 'and !=', this causes the
# expression to evaluate to False instead of True if the shape is undefined;
# the expression needs to evaluate to True in that case.
if self._split is not None and not input_shape[1] == 1: # pylint: disable=g-comparison-negation
raise RuntimeError(
"When using TextVectorization to tokenize strings, the first "
"dimension of the input array must be 1, got shape "
"{}".format(input_shape))
super(TextVectorization, self).build(input_shape)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if self._output_mode == TFIDF:
self.set_vocabulary(updates[_VOCAB_NAME], updates[_IDF_NAME],
updates[_OOV_IDF_NAME])
else:
self.set_vocabulary(updates[_VOCAB_NAME])
def _preprocess(self, inputs):
if self._standardize == LOWER_AND_STRIP_PUNCTUATION:
if ragged_tensor.is_ragged(inputs):
lowercase_inputs = ragged_functional_ops.map_flat_values(
gen_string_ops.string_lower, inputs)
# Depending on configuration, we may never touch the non-data tensor
# in the ragged inputs tensor. If that is the case, and this is the
# only layer in the keras model, running it will throw an error.
# To get around this, we wrap the result in an identity.
lowercase_inputs = array_ops.identity(lowercase_inputs)
else:
lowercase_inputs = gen_string_ops.string_lower(inputs)
inputs = string_ops.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,
"")
elif callable(self._standardize):
inputs = self._standardize(inputs)
elif self._standardize is not None:
raise ValueError(("%s is not a supported standardization. "
"TextVectorization supports the following options "
"for `standardize`: None, "
"'lower_and_strip_punctuation', or a "
"Callable.") % self._standardize)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension 1 and
# so can be squeezed out. We do this here instead of after splitting for
# performance reasons - it's more expensive to squeeze a ragged tensor.
inputs = array_ops.squeeze(inputs, axis=1)
if self._split == SPLIT_ON_WHITESPACE:
# This treats multiple whitespaces as one whitespace, and strips leading
# and trailing whitespace.
inputs = ragged_string_ops.string_split_v2(inputs)
elif callable(self._split):
inputs = self._split(inputs)
else:
raise ValueError(
("%s is not a supported splitting."
"TextVectorization supports the following options "
"for `split`: None, 'whitespace', or a Callable.") % self._split)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however, does
# support both ragged and dense inputs.
if self._ngrams is not None:
inputs = ragged_string_ops.ngrams(
inputs, ngram_width=self._ngrams, separator=" ")
return inputs
def call(self, inputs):
if inputs.shape.rank == 1:
inputs = array_ops.expand_dims(inputs, axis=-1)
self._called = True
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
return inputs
indexed_data = self._index_lookup_layer(inputs)
if self._output_mode == INT:
# Once we have the dense tensor, we can return it if we weren't given a
# fixed output sequence length. If we were, though, we have to dynamically
# choose whether to pad or trim it based on each tensor.
# We need to convert to dense if we have a ragged tensor.
if ragged_tensor.is_ragged(indexed_data):
dense_data = indexed_data.to_tensor(default_value=0)
else:
dense_data = indexed_data
if self._output_sequence_length is None:
dense_data.set_shape(tensor_shape.TensorShape((None, None)))
return dense_data
else:
sequence_len = K.shape(dense_data)[1]
pad_amt = self._output_sequence_length - sequence_len
pad_fn = lambda: array_ops.pad(dense_data, [[0, 0], [0, pad_amt]])
slice_fn = lambda: dense_data[:, :self._output_sequence_length]
output_tensor = control_flow_ops.cond(
sequence_len < self._output_sequence_length,
true_fn=pad_fn,
false_fn=slice_fn)
output_tensor.set_shape(
tensor_shape.TensorShape((None, self._output_sequence_length)))
return output_tensor
# If we're not returning integers here, we rely on the vectorization layer
# to create the output.
return self._vectorize_layer(indexed_data)
class _TextVectorizationAccumulator(
collections.namedtuple("_TextVectorizationAccumulator",
["count_dict", "per_doc_count_dict", "metadata"])):
pass
# A note on this combiner: This contains functionality that will be extracted
# into the Vectorization and IndexLookup combiner objects. At that point,
# TextVectorization can become a PreprocessingStage instead of a Layer and
# this combiner can be retired. Until then, we leave this as is instead of
# attempting a refactor of what will soon be deleted.
class _TextVectorizationCombiner(Combiner):
"""Combiner for the TextVectorization preprocessing layer.
This class encapsulates the logic for computing a vocabulary based on the
frequency of each token.
Attributes:
vocab_size: (Optional) If set, only the top `vocab_size` tokens (based on
frequency across the dataset) are retained in the vocabulary. If None, or
set to a value greater than the total number of distinct tokens in the
dataset, all tokens are retained.
compute_idf: (Optional) If set, the inverse document frequency will be
computed for each value.
"""
def __init__(self, vocab_size=None, compute_idf=False):
self._vocab_size = vocab_size
self._compute_idf = compute_idf
self._input_dtype = dtypes.string
def compute(self, values, accumulator=None):
"""Compute a step in this computation, returning a new accumulator."""
if dtypes.as_dtype(self._input_dtype) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected input type %s, got %s" %
(self._input_dtype, values.dtype))
if ragged_tensor.is_ragged(values):
values = values.to_list()
if isinstance(values, ops.EagerTensor):
values = values.numpy()
if isinstance(values, np.ndarray):
values = values.tolist()
if accumulator is None:
accumulator = self._create_accumulator()
# TODO(momernick): Benchmark improvements to this algorithm.
for document in values:
current_doc_id = accumulator.metadata[0]
for token in document:
accumulator.count_dict[token] += 1
if self._compute_idf:
doc_count = accumulator.per_doc_count_dict[token]
if doc_count["last_doc_id"] != current_doc_id:
doc_count["count"] += 1
doc_count["last_doc_id"] = current_doc_id
accumulator.metadata[0] += 1
return accumulator
def merge(self, accumulators):
"""Merge several accumulators to a single accumulator."""
if not accumulators:
return accumulators
base_accumulator = accumulators[0]
for accumulator in accumulators[1:]:
base_accumulator.metadata[0] += accumulator.metadata[0]
for token, value in accumulator.count_dict.items():
base_accumulator.count_dict[token] += value
if self._compute_idf:
for token, value in accumulator.per_doc_count_dict.items():
# Any newly created token counts in 'base_accumulator''s
# per_doc_count_dict will have a last_doc_id of -1. This is always
# less than the next doc id (which are strictly positive), so any
# future occurrences are guaranteed to be counted.
base_accumulator.per_doc_count_dict[token]["count"] += value["count"]
return base_accumulator
def _inverse_document_frequency(self, document_counts, num_documents):
"""Compute the inverse-document-frequency (IDF) component of TFIDF.
Uses the default weighting scheme described in
https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
Args:
document_counts: An array of the # of documents each token appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return np.log(1 + num_documents / (1 + np.array(document_counts)))
def extract(self, accumulator):
"""Convert an accumulator into a dict of output values.
Args:
accumulator: An accumulator aggregating over the full dataset.
Returns:
A dict of:
"vocab": A list of the retained items in the vocabulary.
"idf": The inverse-document-frequency for each item in vocab.
idf[vocab_idx] is the IDF value for the corresponding vocab item.
"oov_idf": The inverse-document-frequency for the OOV token.
"""
if self._compute_idf:
vocab_counts, document_counts, num_documents = accumulator
else:
vocab_counts, _, _ = accumulator
sorted_counts = sorted(
vocab_counts.items(), key=operator.itemgetter(1, 0), reverse=True)
vocab_data = (
sorted_counts[:self._vocab_size] if self._vocab_size else sorted_counts)
vocab = [data[0] for data in vocab_data]
if self._compute_idf:
doc_counts = [document_counts[token]["count"] for token in vocab]
idf = self._inverse_document_frequency(doc_counts, num_documents[0])
oov_idf = np.array([np.log(1 + num_documents[0])])
return {_VOCAB_NAME: vocab, _IDF_NAME: idf, _OOV_IDF_NAME: oov_idf}
else:
return {_VOCAB_NAME: vocab}
def restore(self, output):
"""Create an accumulator based on 'output'."""
raise NotImplementedError(
"TextVectorization does not restore or support streaming updates.")
def serialize(self, accumulator):
"""Serialize an accumulator for a remote call."""
output_dict = {}
output_dict["metadata"] = accumulator.metadata
output_dict["vocab"] = list(accumulator.count_dict.keys())
output_dict["vocab_counts"] = list(accumulator.count_dict.values())
if self._compute_idf:
output_dict["idf_vocab"] = list(accumulator.per_doc_count_dict.keys())
output_dict["idf_counts"] = [
counter["count"]
for counter in accumulator.per_doc_count_dict.values()
]
return compat.as_bytes(json.dumps(output_dict))
def deserialize(self, encoded_accumulator):
"""Deserialize an accumulator received from 'serialize()'."""
accumulator_dict = json.loads(compat.as_text(encoded_accumulator))
accumulator = self._create_accumulator()
accumulator.metadata[0] = accumulator_dict["metadata"][0]
count_dict = dict(
zip(accumulator_dict["vocab"], accumulator_dict["vocab_counts"]))
accumulator.count_dict.update(count_dict)
if self._compute_idf:
create_dict = lambda x: {"count": x, "last_doc_id": -1}
idf_count_dicts = [
create_dict(count) for count in accumulator_dict["idf_counts"]
]
idf_dict = dict(zip(accumulator_dict["idf_vocab"], idf_count_dicts))
accumulator.per_doc_count_dict.update(idf_dict)
return accumulator
def _create_accumulator(self):
"""Accumulate a sorted array of vocab tokens and corresponding counts."""
count_dict = collections.defaultdict(int)
if self._compute_idf:
create_default_dict = lambda: {"count": 0, "last_doc_id": -1}
per_doc_count_dict = collections.defaultdict(create_default_dict)
else:
per_doc_count_dict = None
metadata = [0]
return _TextVectorizationAccumulator(count_dict, per_doc_count_dict,
metadata)
| apache-2.0 |
keisukefukuda/wrenchset | wrenchset.py | 1 | 16308 | #!env python
#-*- coding: utf-8 -*-
import sys,os,os.path,re
import glob
import shutil
from os.path import join as join
from subprocess import Popen, PIPE, check_call
import copy
VERSION_MAJOR=0
VERSION_MINOR=1
VERSION_PATCH=0
VERSION="%d.%d.%d" % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
Config = {
'HOME' : os.environ['HOME'],
'PREFIX' : join(os.environ['HOME'], '.wrenchset')
}
PMake = '-j3'
gmp="https://gmplib.org/download/gmp/gmp-6.0.0a.tar.bz2"
mpfr="http://www.mpfr.org/mpfr-current/mpfr-3.1.3.tar.bz2"
mpc="ftp://ftp.gnu.org/gnu/mpc/mpc-1.0.3.tar.gz"
mpich="http://www.mpich.org/static/downloads/3.1.4/mpich-3.1.4.tar.gz"
PACKAGE_DIR = join(Config['HOME'], ".wrenchset/tmp/.packages")
TMP_DIR = join(Config['HOME'], ".wrenchset/tmp/")
gmp_ver = "6.0.0a"
mpfr_ver = "3.1.3"
mpc_ver = "1.0.2"
def die(*args):
msg = " ".join(args)
sys.stderr.write("wrenchset: FATAL: " + msg + "\n")
sys.exit(-1)
def version_sorted(vers):
return sorted(vers, key = lambda x: map(int, x.split('.')))
def version_gt(a, b): # if a > b
return map(int, a.split('.')) > map(int, b.split('.'))
def version_lt(a, b): # if a < b
return map(int, a.split('.')) < map(int, b.split('.'))
def version_ge(a, b): # if a >= b
return map(int, a.split('.')) >= map(int, b.split('.'))
def version_lt(a, b): # if a <= b
return map(int, a.split('.')) <= map(int, b.split('.'))
def version_eq(a, b): # if a == b
return map(int, a.split('.')) == map(int, b.split('.'))
class LLVM(object):
@classmethod
def versions(cls):
return version_sorted(["3.4.0", "3.4.1", "3.4.2",
"3.5.0", "3.5.1", "3.5.2",
"3.6.0", "3.6.1", "3.6.2",
"3.7.0"])
@classmethod
def list(cls):
return cls.versions()
@classmethod
def prefix(cls, ver):
return join(Config['PREFIX'], 'llvm-%s' % ver)
@classmethod
def installed(cls, ver):
return os.path.exists(join(cls.prefix(ver), 'bin', 'clang'))
@classmethod
def list_installed(cls):
return [ver for ver in cls.versions() if cls.installed(ver)]
@classmethod
def bin_cxx(cls, ver):
if not cls.installed(ver):
raise Exception("Internal Error: gcc version %s is not installed." % ver)
return join(Config['PREFIX'], 'bin', 'g++-%s' % ver)
@classmethod
def use(cls, ver):
if not cls.installed(ver):
die("LLVM %s is not installed." % ver)
# Remove redundant path from PATH
llvm_path = join(cls.prefix(ver), 'bin')
paths = [p for p in os.environ['PATH'].split(':') if p.find("/.wrenchset/") == -1]
paths = [llvm_path] + paths
# Remove redundant library path from LD_LIBRARY_PATH
llvm_libpaths = cls.library_paths(ver)
ld_paths = [p for p in os.environ['LD_LIBRARY_PATH'].split(':') if p.find("/.wrenchset/") == -1]
ld_paths = llvm_libpaths + ld_paths
cc = cls.bin_cc(ver)
cxx = cls.bin_cxx(ver)
print(";".join(["export LD_LIBRARY_PATH=%s" % ':'.join(ld_paths),
"export PATH=%s" % ':'.join(paths),
"alias cc=%s" % cc,
"alias cxx=%s" % cxx]))
@classmethod
def install(cls, ver):
# LLVM requires gcc 4.7 or later (fixed to 4.9.3 in this script)
if os.path.exists(join(cls.prefix(ver), 'bin', 'clang')):
print("clang-%s is installed" % ver)
return
gcc_ver = "4.9.3"
if not GCC.installed(gcc_ver):
die("To build LLVM, gcc >= 4.7 is required. Please install gcc 4.9.3. Intalled versions: %s" % str(GCC.list_installed()))
# install
llvm_pkg = glob.glob(join(PACKAGE_DIR, "llvm-%s.src.tar.*" % ver))
llvm_dir = deflate_package(llvm_pkg, TMP_DIR)
cfe_pkg = glob.glob(join(PACKAGE_DIR, "cfe-%s.src.tar.*" % ver))
cfe_dir = deflate_package(cfe_pkg, TMP_DIR)
clang_dir = join(llvm_dir, 'tools', 'clang')
if os.path.exists(clang_dir):
shutil.rmtree(clang_dir)
shutil.move(cfe_dir, clang_dir)
build_dir = join(TMP_DIR, 'llvm-build.%s' % ver)
if os.path.isdir(build_dir):
shutil.rmtree(build_dir)
os.mkdir(build_dir)
# env vars to build
lib_paths = ':'.join(GCC.library_paths(gcc_ver))
env = copy.deepcopy(os.environ)
env["LD_LIBRARY_PATH"] = lib_paths + ":" + env["LD_LIBRARY_PATH"]
env["CC"] = GCC.bin_cc(gcc_ver)
env["CXX"] = GCC.bin_cxx(gcc_ver)
print("wrenchset: LD_LIBRARY_PATH=", env["LD_LIBRARY_PATH"])
print("wrenchset: CC=", env["CC"])
print("wrenchset: CXX=", env["CXX"])
print("wrenchset: in %s" % build_dir)
print("wrenchset: ", ' '.join(['sh', join(llvm_dir, 'configure'),
'--enable-cxx1y',
'--prefix=%s' % cls.prefix(ver)]))
check_call(['sh', join(llvm_dir, 'configure'),
'--enable-cxx1y',
'--prefix=%s' % cls.prefix(ver),
'--with-gcc-toolchain=%s' % GCC.prefix(gcc_ver)],
cwd = build_dir, env=env)
check_call(['make', PMake], cwd = build_dir, env=env)
check_call(['make', 'install'], cwd = build_dir, env=env)
@classmethod
def bin_cxx(cls, ver):
if not cls.installed(ver):
raise Exception("Internal Error: gcc version %s is not installed." % ver)
return join(cls.prefix(ver), 'bin', 'clang++')
@classmethod
def bin_cc(cls, ver):
if not cls.installed(ver):
raise Exception("Internal Error: gcc version %s is not installed." % ver)
return join(cls.prefix(ver), 'bin', 'clang')
@classmethod
def library_paths(cls, ver):
gcc_ver = "4.9.3"
if not GCC.installed(gcc_ver):
die("To build LLVM, gcc >= 4.7 is required. Please install gcc 4.9.3. Intalled versions: %s" % str(GCC.list_installed()))
return GCC.library_paths(gcc_ver) + [join(cls.prefix(ver), 'lib')]
class GCC(object):
@classmethod
def versions(cls):
return version_sorted(['5.3.0', '5.2.0', '5.1.0',
'4.9.3', '4.9.2', '4.9.1', '4.9.0',
'4.8.5', '4.8.4', '4.8.3', '4.8.2', '4.8.1', '4.8.0'])
@classmethod
def use(cls, ver):
if not cls.installed(ver):
die("GCC %s is not installed." % ver)
# Remove redundant path from PATH
gcc_path = join(cls.prefix(ver), 'bin')
paths = [p for p in os.environ['PATH'].split(':') if p.find("/.wrenchset/") == -1]
paths = [gcc_path] + paths
# Remove redundant library path from LD_LIBRARY_PATH
gcc_libpaths = cls.library_paths(ver)
ld_paths = [p for p in os.environ.get('LD_LIBRARY_PATH','').split(':') if p.find("/.wrenchset/") == -1]
ld_paths = gcc_libpaths + ld_paths
cc = cls.bin_cc(ver)
cxx = cls.bin_cxx(ver)
print(";".join(["export LD_LIBRARY_PATH=%s" % ':'.join(ld_paths),
"export PATH=%s" % ':'.join(paths),
"alias cc=%s" % cc,
"alias cxx=%s" % cxx
]))
@classmethod
def prefix(cls, ver):
return join(Config['PREFIX'], 'gcc-%s' % ver)
@classmethod
def list(cls):
return cls.versions()
@classmethod
def installed(cls, ver):
p = cls.prefix(ver)
return os.path.exists(join(p, 'bin', 'gcc'))
@classmethod
def list_installed(cls):
return [ver for ver in cls.versions() if cls.installed(ver)]
@classmethod
def install(cls, ver):
install_gmp()
install_mpfr()
install_mpc()
# check if gcc-{ver} is installed
p = cls.prefix(ver)
if os.path.exists(join(p, 'bin', 'gcc')):
print("gcc-%s is installed" % ver)
return
# install
pkg = glob.glob(join(PACKAGE_DIR, "gcc-" + ver + '*'))
try:
dname = deflate_package(pkg, TMP_DIR)
except:
print("Can't find gcc package: gcc-%s.* in %s" % (ver, PACKAGE_DIR))
exit(-1)
env = copy.deepcopy(os.environ)
if 'LD_LIBRARY_PATH' not in env: env['LD_LIBRARY_PATH'] = ""
env["LD_LIBRARY_PATH"] = join(Config['PREFIX'],'lib') + ":" + join(Config['PREFIX'],'lib64') + ":" + env["LD_LIBRARY_PATH"]
cmd = ['sh', 'configure',
'--with-gmp=%s' % Config['PREFIX'],
'--with-mpfr=%s' % Config['PREFIX'],
'--with-mpc=%s' % Config['PREFIX'],
'--disable-java',
'--disable-multilib',
'--prefix=%s' % p]
check_call(cmd, cwd = dname, env = env)
check_call(['make', PMake], cwd = dname, env = env)
check_call(['make', 'install'], cwd = dname, env = env)
@classmethod
def bin_cc(cls, ver):
if not cls.installed(ver):
raise Exception("Internal Error: gcc version %s is not installed." % ver)
p = cls.prefix(ver)
return join(p, 'bin', 'gcc')
@classmethod
def bin_cxx(cls, ver):
if not cls.installed(ver):
raise Exception("Internal Error: gcc version %s is not installed." % ver)
p = cls.prefix(ver)
return join(p, 'bin', 'g++')
@classmethod
def library_paths(cls, ver):
p = cls.prefix(ver)
return [join(Config['PREFIX'], 'lib'), join(Config['PREFIX'], 'lib64'), join(p, 'lib'), join(p, 'lib64')]
if not os.path.exists(TMP_DIR):
try:
os.makedirs(TMP_DIR)
except OSError as err:
if err.errno != 17:
raise
def filter_if_match(reg, lst):
return filter(lambda x: re.match(reg, x), lst)
def deflate_package(pkgs, d, **kwd):
if isinstance(pkgs, list):
# If there are multiple candidates, Select the most 'easy' pakcage format
for ext in [r'.*\.gz$', r'.*\.bz2$', r'.*\.xz$']:
p = filter_if_match(ext, pkgs)
if len(p) > 0:
pkg = p[0]
break
else:
raise Exception("No deflatable package: %s in directory '%s'" % (str(pkgs), d))
else:
pkg = pkgs
if re.match(r'.*\.gz$', pkg):
tar_opt = '-z'
elif re.match(r'.*\.bz2$', pkg):
tar_opt = '-j'
elif re.match(r'.*\.xz$', pkg):
tar_opt = '-J'
else:
raise "Unknown package format: " % pkg
p = Popen(['tar', tar_opt, '-t' , '-f', pkg], stdout=PIPE, cwd=TMP_DIR)
out = p.communicate()[0]
assert p.returncode == 0
dname = out.split()[0].split('/')[0]
dname = join(d, dname)
if os.path.exists(dname):
#shutil.rmtree(dname)
print("%s already exists" % dname)
return dname
check_call(['tar', tar_opt, '-xvf', pkg], cwd=d)
return dname
def install_gmp():
# check if gmp is already installed
if len(glob.glob(join(Config['PREFIX'], 'lib', 'libgmp*'))) > 0:
sys.stderr.write("libgmp is already installed\n")
return
# install
pkg = glob.glob(join(PACKAGE_DIR, "gmp-" + gmp_ver + '*'))
dname = deflate_package(pkg, TMP_DIR)
env = copy.deepcopy(os.environ)
if 'LD_LIBRARY_PATH' not in env: env['LD_LIBRARY_PATH'] = ""
env["LD_LIBRARY_PATH"] = join(Config['PREFIX'],'lib') + ":" + join(Config['PREFIX'],'lib64') + ":" + env["LD_LIBRARY_PATH"]
check_call(['sh', 'configure', '--prefix=%s' % Config['PREFIX']], cwd=dname, env=env)
check_call(['make', PMake, 'install'], cwd=dname, env=env)
def install_mpfr():
# check if gmp is already installed
if len(glob.glob(join(Config['PREFIX'], 'lib', 'libmpfr*'))) > 0:
sys.stderr.write("libmpfr is already installed\n")
return
# install
pkg = glob.glob(join(PACKAGE_DIR, "mpfr-" + mpfr_ver + '*'))
dname = deflate_package(pkg, TMP_DIR)
env = copy.deepcopy(os.environ)
if 'LD_LIBRARY_PATH' not in env: env['LD_LIBRARY_PATH'] = ""
env["LD_LIBRARY_PATH"] = join(Config['PREFIX'],'lib') + ":" + join(Config['PREFIX'],'lib64') + ":" + env["LD_LIBRARY_PATH"]
check_call(['sh', 'configure',
'--with-gmp=%s' % Config['PREFIX'],
'--prefix=%s' % Config['PREFIX']], cwd=dname,env=env)
check_call(['make', PMake, 'install'], cwd=dname,env=env)
def install_mpc():
# check if gmp is already installed
if len(glob.glob(join(Config['PREFIX'], 'lib', 'libmpc*'))) > 0:
sys.stderr.write("libmpc is already installed\n")
return
# install
pkg = glob.glob(join(PACKAGE_DIR, "mpc-" + mpc_ver + '*'))
dname = deflate_package(pkg, TMP_DIR)
env = copy.deepcopy(os.environ)
if 'LD_LIBRARY_PATH' not in env: env['LD_LIBRARY_PATH'] = ""
env["LD_LIBRARY_PATH"] = join(Config['PREFIX'],'lib') + ":" + join(Config['PREFIX'],'lib64') + ":" + env["LD_LIBRARY_PATH"]
check_call(['sh', 'configure',
'--with-gmp=%s' % Config['PREFIX'],
'--with-mpfr=%s' % Config['PREFIX'],
'--prefix=%s' % Config['PREFIX']], cwd=dname, env=env)
check_call(['make', PMake, 'install'], cwd=dname, env=env)
def command_list(args):
if len(args) > 0:
if args[0] == "installed":
list_gcc = GCC.list_installed()
list_llvm = LLVM.list_installed()
else:
raise Exception("Unknown subcommand for 'list' : '%s'" % args[0])
else:
list_gcc = GCC.list()
list_llvm = LLVM.list()
for v in list_gcc: print("GCC-%s" % v)
for v in list_llvm: print("LLVM-%s" % v)
def parse_compiler_ver(args):
if len(args) >= 2:
# parse compiler name
if re.match(r'gcc', args[0], re.I) or re.match(r'gnu', args[0], re.I):
compiler = 'gcc'
elif re.match(r'llvm', args[0], re.I) or re.match(r'^clang$', args[0], re.I):
compiler = 'llvm'
else:
raise Exception("Invalid compiler name : '%s'" % args[0])
# parse version number
if re.match(r'^\d+(\.\d+(\.\d+)?)?$', args[1]):
return (args[0], args[1])
else:
raise Exception("Invalid version number: '%s'" % args[1])
elif len(args) == 1:
m = re.match(r'(gcc|gnu)-?(\d+(\.\d+(\.\d+)?)?)', args[0], re.I)
if m:
return ('gcc', m.group(2))
m = re.match(r'(llvm|clang)-?(\d+(\.\d+(\.\d+)?)?)', args[0], re.I)
if m:
return ('llvm', m.group(2))
raise Exception("Cannot parse compiler name and version number: '%s'" % args[0])
def command_prefix(args):
if len(args) == 0:
print("not yet implemented.")
exit(0)
compiler, ver = parse_compiler_ver(args)
if compiler == "llvm": print(LLVM.prefix(ver))
elif compiler == "gcc": print(GCC.prefix(ver))
def command_install(args):
compiler, ver = parse_compiler_ver(args)
if compiler == "gcc":
if not ver in GCC.versions():
raise Exception("Unknown gcc version: '%s'" % ver)
GCC.install(ver)
return
elif compiler == "llvm":
if not ver in LLVM.versions():
raise Exception("Unknown LLVM version: '%s'" % ver)
LLVM.install(ver)
return
def dispatch_command(args):
cmd = args[0]
if cmd == 'list':
command_list(args[1:])
elif cmd == 'install':
command_install(args[1:])
elif cmd == 'use':
command_use(args[1:])
elif cmd == 'prefix':
command_prefix(args[1:])
else:
sys.stderr.write("wrenchset: Error: unknown command '%s'\n" % cmd)
sys.exit(-1)
def command_use(args):
try:
compiler, ver = parse_compiler_ver(args)
except Exception as e:
die(str(e))
if re.match(r'llvm', compiler, re.I):
LLVM.use(ver)
elif re.match(r'gcc', compiler, re.I):
GCC.use(ver)
else:
die("Internal error: Unknown compiler: '%s'" % compiler)
if __name__ == "__main__":
dispatch_command(sys.argv[1:])
| mit |
jakevdp/networkx | networkx/tests/test_relabel.py | 29 | 6842 | #!/usr/bin/env python
from nose.tools import *
from networkx import *
from networkx.convert import *
from networkx.algorithms.operators import *
from networkx.generators.classic import barbell_graph,cycle_graph
from networkx.testing import *
class TestRelabel():
def test_convert_node_labels_to_integers(self):
# test that empty graph converts fine for all options
G=empty_graph()
H=convert_node_labels_to_integers(G,100)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(H.nodes(), [])
assert_equal(H.edges(), [])
for opt in ["default", "sorted", "increasing degree",
"decreasing degree"]:
G=empty_graph()
H=convert_node_labels_to_integers(G,100, ordering=opt)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(H.nodes(), [])
assert_equal(H.edges(), [])
G=empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
G.name="paw"
H=convert_node_labels_to_integers(G)
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
H=convert_node_labels_to_integers(G,1000)
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(H.nodes(), [1000, 1001, 1002, 1003])
H=convert_node_labels_to_integers(G,ordering="increasing degree")
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(degree(H,0), 1)
assert_equal(degree(H,1), 2)
assert_equal(degree(H,2), 2)
assert_equal(degree(H,3), 3)
H=convert_node_labels_to_integers(G,ordering="decreasing degree")
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(degree(H,0), 3)
assert_equal(degree(H,1), 2)
assert_equal(degree(H,2), 2)
assert_equal(degree(H,3), 1)
H=convert_node_labels_to_integers(G,ordering="increasing degree",
label_attribute='label')
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
assert_equal(degree(H,0), 1)
assert_equal(degree(H,1), 2)
assert_equal(degree(H,2), 2)
assert_equal(degree(H,3), 3)
# check mapping
assert_equal(H.node[3]['label'],'C')
assert_equal(H.node[0]['label'],'D')
assert_true(H.node[1]['label']=='A' or H.node[2]['label']=='A')
assert_true(H.node[1]['label']=='B' or H.node[2]['label']=='B')
def test_convert_to_integers2(self):
G=empty_graph()
G.add_edges_from([('C','D'),('A','B'),('A','C'),('B','C')])
G.name="paw"
H=convert_node_labels_to_integers(G,ordering="sorted")
degH=H.degree().values()
degG=G.degree().values()
assert_equal(sorted(degH), sorted(degG))
H=convert_node_labels_to_integers(G,ordering="sorted",
label_attribute='label')
assert_equal(H.node[0]['label'],'A')
assert_equal(H.node[1]['label'],'B')
assert_equal(H.node[2]['label'],'C')
assert_equal(H.node[3]['label'],'D')
@raises(nx.NetworkXError)
def test_convert_to_integers_raise(self):
G = nx.Graph()
H=convert_node_labels_to_integers(G,ordering="increasing age")
def test_relabel_nodes_copy(self):
G=empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H=relabel_nodes(G,mapping)
assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_function(self):
G=empty_graph()
G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
# function mapping no longer encouraged but works
def mapping(n):
return ord(n)
H=relabel_nodes(G,mapping)
assert_equal(sorted(H.nodes()), [65, 66, 67, 68])
def test_relabel_nodes_graph(self):
G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H=relabel_nodes(G,mapping)
assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_digraph(self):
G=DiGraph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
H=relabel_nodes(G,mapping,copy=False)
assert_equal(sorted(H.nodes()), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_multigraph(self):
G=MultiGraph([('a','b'),('a','b')])
mapping={'a':'aardvark','b':'bear'}
G=relabel_nodes(G,mapping,copy=False)
assert_equal(sorted(G.nodes()), ['aardvark', 'bear'])
assert_edges_equal(sorted(G.edges()),
[('aardvark', 'bear'), ('aardvark', 'bear')])
def test_relabel_nodes_multidigraph(self):
G=MultiDiGraph([('a','b'),('a','b')])
mapping={'a':'aardvark','b':'bear'}
G=relabel_nodes(G,mapping,copy=False)
assert_equal(sorted(G.nodes()), ['aardvark', 'bear'])
assert_equal(sorted(G.edges()),
[('aardvark', 'bear'), ('aardvark', 'bear')])
def test_relabel_isolated_nodes_to_same(self):
G=Graph()
G.add_nodes_from(range(4))
mapping={1:1}
H=relabel_nodes(G, mapping, copy=False)
assert_equal(sorted(H.nodes()), list(range(4)))
@raises(KeyError)
def test_relabel_nodes_missing(self):
G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
mapping={0:'aardvark'}
G=relabel_nodes(G,mapping,copy=False)
def test_relabel_toposort(self):
K5=nx.complete_graph(4)
G=nx.complete_graph(4)
G=nx.relabel_nodes(G,dict( [(i,i+1) for i in range(4)]),copy=False)
nx.is_isomorphic(K5,G)
G=nx.complete_graph(4)
G=nx.relabel_nodes(G,dict( [(i,i-1) for i in range(4)]),copy=False)
nx.is_isomorphic(K5,G)
def test_relabel_selfloop(self):
G = nx.DiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
assert_equal(sorted(G.nodes()),['One','Three','Two'])
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
assert_equal(sorted(G.nodes()),['One','Three','Two'])
G = nx.MultiDiGraph([(1, 1)])
G = nx.relabel_nodes(G, {1: 0}, copy=False)
assert_equal(G.nodes(), [0])
| bsd-3-clause |
Diacamma2/financial | diacamma/accounting/views_budget.py | 1 | 13598 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.db.models import Q
from lucterios.framework.xferadvance import TITLE_MODIFY, TITLE_ADD, TITLE_DELETE, TITLE_PRINT, TITLE_OK, TITLE_CANCEL,\
XferSave
from lucterios.framework.xferadvance import XferListEditor
from lucterios.framework.xferadvance import XferAddEditor
from lucterios.framework.xferadvance import XferDelete
from lucterios.framework.xfergraphic import XferContainerAcknowledge
from lucterios.framework.xfercomponents import XferCompLabelForm, XferCompImage, XferCompSelect
from lucterios.framework.tools import ActionsManage, MenuManage, CLOSE_YES, WrapAction
from lucterios.framework.tools import SELECT_SINGLE
from lucterios.framework.signal_and_lock import Signal
from lucterios.CORE.xferprint import XferPrintAction
from diacamma.accounting.tools import current_system_account, format_with_devise
from diacamma.accounting.models import Budget, CostAccounting, FiscalYear, ChartsAccount, EntryLineAccount
from django.db.models.aggregates import Sum
@MenuManage.describ('accounting.change_budget')
class BudgetList(XferListEditor):
icon = "account.png"
model = Budget
field_id = 'budget'
caption = _("Prévisionnal budget")
def fillresponse_header(self):
row_id = self.get_max_row() + 1
if self.getparam('year', 0) != 0:
year = FiscalYear.get_current(self.getparam('year'))
lbl = XferCompLabelForm('title_year')
lbl.set_italic()
lbl.set_value("{[b]}%s{[/b]} : %s" % (_('fiscal year'), year))
lbl.set_location(1, row_id, 2)
self.add_component(lbl)
row_id += 1
if self.getparam('cost_accounting') is not None:
cost = CostAccounting.objects.get(id=self.getparam('cost_accounting', 0))
lbl = XferCompLabelForm('title_cost')
lbl.set_italic()
lbl.set_value("{[b]}%s{[/b]} : %s" % (_('cost accounting'), cost))
lbl.set_location(1, row_id, 2)
self.add_component(lbl)
Signal.call_signal('editbudget', self)
self.filter = Q()
if self.getparam('year', 0) != 0:
self.filter &= Q(year_id=self.getparam('year'))
if self.getparam('cost_accounting') is not None:
self.filter &= Q(cost_accounting_id=self.getparam('cost_accounting'))
def fill_grid(self, row, model, field_id, items):
XferListEditor.fill_grid(self, row, model, field_id, items)
if self.getparam('cost_accounting') is None:
grid = self.get_components(field_id)
grid.record_ids = []
grid.records = {}
last_code = ''
value = 0
for current_budget in items:
if last_code != current_budget.code:
if last_code != '':
chart = ChartsAccount.get_chart_account(last_code)
grid.set_value('C' + last_code, 'budget', str(chart))
grid.set_value('C' + last_code, 'montant', value)
value = 0
last_code = current_budget.code
value += current_budget.credit_debit_way() * current_budget.amount
if last_code != '':
chart = ChartsAccount.get_chart_account(last_code)
grid.set_value('C' + last_code, 'budget', str(chart))
grid.set_value('C' + last_code, 'montant', value)
grid.nb_lines = len(grid.records)
grid.order_list = None
grid.page_max = 1
grid.page_num = 0
def fillresponse_body(self):
self.get_components("title").colspan = 2
row_id = self.get_max_row() + 1
expense_filter = Q(code__regex=current_system_account().get_expence_mask()) | (Q(code__regex=current_system_account().get_annexe_mask()) & Q(amount__lt=0))
self.fill_grid(row_id, self.model, 'budget_expense', self.model.objects.filter(self.filter & expense_filter).distinct())
self.get_components("budget_expense").colspan = 3
self.get_components("budget_expense").description = _("Expense")
revenue_filter = Q(code__regex=current_system_account().get_revenue_mask()) | (Q(code__regex=current_system_account().get_annexe_mask()) & Q(amount__gte=0))
self.fill_grid(row_id + 1, self.model, 'budget_revenue', self.model.objects.filter(self.filter & revenue_filter).distinct())
self.get_components("budget_revenue").colspan = 3
self.get_components("budget_revenue").description = _("Revenue")
resultat_budget = Budget.get_total(self.getparam('year'), self.getparam('cost_accounting'))
if abs(resultat_budget) > 0.0001:
row_id = self.get_max_row() + 1
lbl = XferCompLabelForm('result')
lbl.set_value(resultat_budget)
lbl.set_format(format_with_devise(5))
lbl.set_location(0, row_id, 2)
if resultat_budget > 0:
lbl.description = _('result (profit)')
else:
lbl.description = _('result (deficit)')
self.add_component(lbl)
@MenuManage.describ('accounting.change_budget')
@ActionsManage.affect_list(TITLE_PRINT, "images/print.png")
class BudgetPrint(XferPrintAction):
icon = "account.png"
model = Budget
field_id = 'budget'
caption = _("Print previsionnal budget")
with_text_export = True
action_class = BudgetList
def condition_changebudget(xfer, gridname=''):
return not xfer.getparam('readonly', False)
@ActionsManage.affect_grid(TITLE_MODIFY, "images/edit.png", unique=SELECT_SINGLE, condition=condition_changebudget)
@ActionsManage.affect_list(TITLE_ADD, "images/add.png", condition=condition_changebudget)
@MenuManage.describ('accounting.add_budget')
class BudgetAddModify(XferAddEditor):
icon = "account.png"
model = Budget
field_id = 'budget'
caption_add = _("Add budget line")
caption_modify = _("Modify budget line")
class XferSaveBudget(XferSave):
def _load_unique_record(self, itemid):
if itemid[0] == 'C':
self.item = Budget()
self.item.id = itemid
self.item.year_id = self.getparam('year', 0)
self.item.code = itemid[1:]
self.fill_simple_fields()
else:
XferSave._load_unique_record(self, itemid)
def run_save(self, request, *args, **kwargs):
save = BudgetAddModify.XferSaveBudget()
save.is_view_right = self.is_view_right
save.locked = self.locked
save.model = self.model
save.field_id = self.field_id
save.caption = self.caption
return save.request_handling(request, *args, **kwargs)
def _load_unique_record(self, itemid):
if itemid[0] == 'C':
self.item = Budget()
self.item.id = itemid
self.item.code = itemid[1:]
val = Budget.objects.filter(code=self.item.code, year_id=self.getparam('year', 0)).aggregate(Sum('amount'))
self.item.amount = val['amount__sum']
else:
XferAddEditor._load_unique_record(self, itemid)
def _search_model(self):
if self.getparam("budget_revenue") is not None:
self.field_id = 'budget_revenue'
if self.getparam("budget_expense") is not None:
self.field_id = 'budget_expense'
XferAddEditor._search_model(self)
@ActionsManage.affect_grid(TITLE_DELETE, "images/delete.png", unique=SELECT_SINGLE, condition=condition_changebudget)
@MenuManage.describ('accounting.delete_budget')
class BudgetDel(XferDelete):
icon = "account.png"
model = Budget
field_id = 'budget'
caption = _("Delete Budget line")
def _load_unique_record(self, itemid):
if itemid[0] == 'C':
self.item = Budget()
self.item.id = itemid
self.item.year_id = self.getparam('year', 0)
self.item.code = itemid[1:]
else:
XferAddEditor._load_unique_record(self, itemid)
def _search_model(self):
if self.getparam("budget_revenue") is not None:
self.field_id = 'budget_revenue'
if self.getparam("budget_expense") is not None:
self.field_id = 'budget_expense'
XferAddEditor._search_model(self)
@ActionsManage.affect_grid(_("Budget"), "account.png", unique=SELECT_SINGLE)
@MenuManage.describ('accounting.change_budget')
class CostAccountingBudget(XferContainerAcknowledge):
icon = "account.png"
model = CostAccounting
field_id = 'costaccounting'
caption = _("Budget")
readonly = True
methods_allowed = ('GET', )
def fillresponse(self):
read_only = (self.item.status == CostAccounting.STATUS_CLOSED) or self.item.is_protected
self.redirect_action(BudgetList.get_action(), close=CLOSE_YES, params={'cost_accounting': self.item.id, 'readonly': read_only})
@ActionsManage.affect_list(_("Import"), "account.png", condition=lambda xfer: not xfer.getparam('readonly', False))
@MenuManage.describ('accounting.add_budget')
class BudgetImport(XferContainerAcknowledge):
icon = "account.png"
model = Budget
field_id = 'budget'
caption = _("Import budget")
def add_sel(self, costaccounting):
res = []
if costaccounting is not None:
res.append((costaccounting.id, str(costaccounting)))
res.extend(self.add_sel(costaccounting.last_costaccounting))
return res
def fillresponse(self, year=0, cost_accounting=0):
if self.getparam("CONFIRME", "") != "YES":
dlg = self.create_custom()
img = XferCompImage('img')
img.set_value(self.icon_path())
img.set_location(0, 0, 1, 3)
dlg.add_component(img)
lbl = XferCompLabelForm('title')
lbl.set_value_as_title(self.caption)
lbl.set_location(1, 0, 6)
dlg.add_component(lbl)
if cost_accounting == 0:
year = FiscalYear.get_current(year)
sel = XferCompSelect('currentyear')
sel.set_needed(True)
sel.set_select_query(FiscalYear.objects.filter(end__lt=year.begin))
sel.description = _('fiscal year')
sel.set_location(1, 1)
dlg.add_component(sel)
else:
current_cost = CostAccounting.objects.get(id=cost_accounting)
sel = XferCompSelect('costaccounting')
sel.set_needed(True)
sel.set_select(self.add_sel(current_cost.last_costaccounting))
sel.set_location(1, 1)
sel.description = _('cost accounting')
dlg.add_component(sel)
lbl = XferCompLabelForm('lbl_info')
lbl.set_value_as_header(_('All budget lines will be delete and income statement of select item will be import as new budget.'))
lbl.set_location(1, 2, 2)
dlg.add_component(lbl)
dlg.add_action(self.return_action(TITLE_OK, "images/ok.png"), close=CLOSE_YES, params={'CONFIRME': 'YES'})
dlg.add_action(WrapAction(TITLE_CANCEL, 'images/cancel.png'))
else:
currentyear = self.getparam('currentyear', 0)
costaccounting = self.getparam('costaccounting', 0)
if cost_accounting == 0:
budget_filter = Q(year_id=year)
else:
budget_filter = Q(cost_accounting_id=cost_accounting)
for budget_line in Budget.objects.filter(budget_filter).distinct():
if (cost_accounting != 0) or (budget_line.cost_accounting_id is None):
budget_line.delete()
if cost_accounting == 0:
for chart in ChartsAccount.objects.filter(Q(year_id=currentyear) & Q(type_of_account__in=(3, 4))).distinct():
value = chart.get_current_total(with_correction=False)
for current_budget in Budget.objects.filter(year_id=year, code=chart.code):
value -= current_budget.amount
if abs(value) > 0.001:
Budget.objects.create(code=chart.code, amount=value, year_id=year)
else:
if year == 0:
year = None
values = {}
for line in EntryLineAccount.objects.filter(account__type_of_account__in=(3, 4), costaccounting_id=costaccounting).distinct():
if line.account.code not in values.keys():
values[line.account.code] = 0.0
values[line.account.code] += line.amount
for code, value in values.items():
if abs(value) > 0.001:
Budget.objects.create(code=code, amount=value, year_id=year, cost_accounting_id=cost_accounting)
@ActionsManage.affect_list(_("Budget"), "account.png")
@MenuManage.describ('accounting.change_budget')
class FiscalYearBudget(XferContainerAcknowledge):
icon = "account.png"
model = ChartsAccount
field_id = 'chartsaccount'
caption = _("Budget")
readonly = True
methods_allowed = ('GET', )
def fillresponse(self, year):
fiscal_year = FiscalYear.get_current(year)
read_only = (fiscal_year.status == FiscalYear.STATUS_FINISHED)
self.redirect_action(BudgetList.get_action(), close=CLOSE_YES, params={'year': fiscal_year.id, 'readonly': read_only})
| gpl-3.0 |
sutartmelson/girder | plugins/worker/server/constants.py | 2 | 1196 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# The path that will be mounted in docker containers for data IO
DOCKER_DATA_VOLUME = '/mnt/girder_worker/data'
# The path that will be mounted in docker containers for utility scripts
DOCKER_SCRIPTS_VOUME = '/mnt/girder_worker/scripts'
# Settings where plugin information is stored
class PluginSettings(object):
BROKER = 'worker.broker'
BACKEND = 'worker.backend'
API_URL = 'worker.api_url'
| apache-2.0 |
thaim/ansible | test/units/module_utils/basic/test_filesystem.py | 113 | 5190 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2016 Toshio Kuratomi <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from units.mock.procenv import ModuleTestCase
from units.compat.mock import patch, MagicMock
from ansible.module_utils.six.moves import builtins
realimport = builtins.__import__
class TestOtherFilesystem(ModuleTestCase):
def test_module_utils_basic_ansible_module_user_and_group(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
mock_stat = MagicMock()
mock_stat.st_uid = 0
mock_stat.st_gid = 0
with patch('os.lstat', return_value=mock_stat):
self.assertEqual(am.user_and_group('/path/to/file'), (0, 0))
def test_module_utils_basic_ansible_module_find_mount_point(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
def _mock_ismount(path):
if path == b'/':
return True
return False
with patch('os.path.ismount', side_effect=_mock_ismount):
self.assertEqual(am.find_mount_point('/root/fs/../mounted/path/to/whatever'), '/')
def _mock_ismount(path):
if path == b'/subdir/mount':
return True
if path == b'/':
return True
return False
with patch('os.path.ismount', side_effect=_mock_ismount):
self.assertEqual(am.find_mount_point('/subdir/mount/path/to/whatever'), '/subdir/mount')
def test_module_utils_basic_ansible_module_set_owner_if_different(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
self.assertEqual(am.set_owner_if_different('/path/to/file', None, True), True)
self.assertEqual(am.set_owner_if_different('/path/to/file', None, False), False)
am.user_and_group = MagicMock(return_value=(500, 500))
with patch('os.lchown', return_value=None) as m:
self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
m.assert_called_with(b'/path/to/file', 0, -1)
def _mock_getpwnam(*args, **kwargs):
mock_pw = MagicMock()
mock_pw.pw_uid = 0
return mock_pw
m.reset_mock()
with patch('pwd.getpwnam', side_effect=_mock_getpwnam):
self.assertEqual(am.set_owner_if_different('/path/to/file', 'root', False), True)
m.assert_called_with(b'/path/to/file', 0, -1)
with patch('pwd.getpwnam', side_effect=KeyError):
self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
m.reset_mock()
am.check_mode = True
self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
self.assertEqual(m.called, False)
am.check_mode = False
with patch('os.lchown', side_effect=OSError) as m:
self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
def test_module_utils_basic_ansible_module_set_group_if_different(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
self.assertEqual(am.set_group_if_different('/path/to/file', None, True), True)
self.assertEqual(am.set_group_if_different('/path/to/file', None, False), False)
am.user_and_group = MagicMock(return_value=(500, 500))
with patch('os.lchown', return_value=None) as m:
self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
m.assert_called_with(b'/path/to/file', -1, 0)
def _mock_getgrnam(*args, **kwargs):
mock_gr = MagicMock()
mock_gr.gr_gid = 0
return mock_gr
m.reset_mock()
with patch('grp.getgrnam', side_effect=_mock_getgrnam):
self.assertEqual(am.set_group_if_different('/path/to/file', 'root', False), True)
m.assert_called_with(b'/path/to/file', -1, 0)
with patch('grp.getgrnam', side_effect=KeyError):
self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
m.reset_mock()
am.check_mode = True
self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
self.assertEqual(m.called, False)
am.check_mode = False
with patch('os.lchown', side_effect=OSError) as m:
self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
| mit |
casselineau/Tracer | tracer/tracer_engine_mp.py | 1 | 6013 | import time
import numpy as N
from pathos.multiprocessing import ProcessingPool as Pool
from tracer.sources import *
from tracer.tracer_engine import *
from tracer.assembly import *
from copy import copy
class TracerEngineMP(TracerEngine):
'''
Famework for multi-processing using the tracer engine as is.
Requires pathos: https://github.com/uqfoundation/pathos
Inheritance is broken by the multiprocessing pool and rebuilt on the tree and self._asm
The original assembly needs to be reallocated after the simulation to be able to get the values stored in the optical managers with previously defined objects.
Not the cleanest or finest implementation. Could be blended with the original engine and use the very same api. It works.
'''
def multi_ray_sim(self, sources, procs=1, minener=1e-10, reps=1000, tree=True):
self.minener = minener # minimum energy threshold
self.reps = reps # stop iteration after this many ray bundles were generated (i.e.
# after the original rays intersected some surface this many times).
self.tree_switch = tree
# The multiprocessing raytracing method to call from the original engine.
if len(sources) != procs:
raise Exception('Number of sources and processors do not agree')
# Creates a pool of processes and makes them raytrace one different source each. The resm list returned is a list of copies of the original engine post raytrace.
timetrace = time.clock()
pool = Pool(processes=procs)
def trace(source):
self.ray_tracer(source, self.reps, self.minener, self.tree_switch)
return self
resm = pool.map(trace, sources)
del pool
timetrace = time.clock() - timetrace
#print 'Raytrace time: ', timetrace,'s'
timepost = time.clock()
# New general tree:
for eng in xrange(len(resm)):
# Get and regroup results in one tree and assembly only:
if eng == 0 : # Initialise with the first engine
if self.tree_switch == True:
self.tree = resm[eng].tree
self._asm = resm[eng]._asm
else:
if self.tree_switch == True:
eng_bunds = resm[eng].tree._bunds
for b in xrange(len(eng_bunds)):
if b > 0: # if it is not the starting bundle (emanating from the source) add to the parents indices according to the size of the general tree bundle size.
eng_bunds[b]._parents = eng_bunds[b]._parents+next_parents_adjust
if b == len(self.tree._bunds): # If the bundle number is over the existing limit in the general tree, append it to increase the general tree size.
self.tree.append(eng_bunds[b])
else: # concatenate the bundle with its existing counterpart in the general tree
next_parents_adjust = self.tree._bunds[b].get_num_rays() # to adjust the index of parents before changing the total size of the general tree bundle.
self.tree._bunds[b] = concatenate_rays([self.tree._bunds[b], eng_bunds[b]])
# Next loop is to get the optics callable objects and copy regroup their values without asumptions about what they are.
subas_engine = resm[eng]._asm.get_assemblies()
if len(subas_engine):
for a in xrange(len(subas_engine)):
objs_subas = subas_engine[a].get_local_objects()
for o in xrange(len(objs_subas)):
surfs_object = objs_subas[o].get_surfaces()
for s in xrange(len(surfs_object)):
for k in surfs_object[s]._opt.__dict__.keys():
if k != '_opt':
if hasattr(surfs_object[s]._opt.__dict__[k], '__len__'):
[self._asm._assemblies[a]._objects[o].surfaces[s]._opt.__dict__[k].append(q) for q in surfs_object[s]._opt.__dict__[k]]
objs_engine = resm[eng]._asm.get_local_objects()
if len(objs_engine):
for o in xrange(len(objs_engine)):
surfs_object = objs_engine[o].get_surfaces()
for s in xrange(len(surfs_object)):
for k in surfs_object[s]._opt.__dict__.keys():
if k != '_opt':
[self._asm._objects[o].surfaces[s]._opt.__dict__[k].append(q) for q in surfs_object[s]._opt.__dict__[k]]
# We need the next part to reshape everything to the right array format.
asm_subas = self._asm.get_assemblies()
if len(asm_subas):
for a in xrange(len(asm_subas)):
objs_subas = asm_subas[a].get_local_objects()
for o in xrange(len(objs_subas)):
surfs_object = objs_subas[o].get_surfaces()
for s in xrange(len(surfs_object)):
for k in surfs_object[s]._opt.__dict__.keys():
if k != '_opt':
if hasattr(surfs_object[s]._opt.__dict__[k], '__len__'):
if len(surfs_object[s]._opt.__dict__[k])>0:
if k == '_absorbed':
self._asm._assemblies[a]._objects[o].surfaces[s]._opt.__dict__[k] = [N.hstack(self._asm._assemblies[a]._objects[o].surfaces[s]._opt.__dict__[k])]
else:
self._asm._assemblies[a]._objects[o].surfaces[s]._opt.__dict__[k] = [N.column_stack(self._asm._assemblies[a]._objects[o].surfaces[s]._opt.__dict__[k])]
#else:
# self._asm._assemblies[a]._objects[o].surfaces[s]._opt.__dict__[k] = []
asm_objs = self._asm.get_local_objects()
if len(asm_objs):
for o in xrange(len(asm_objs)):
surfs_object = asm_objs[o].get_surfaces()
for s in xrange(len(surfs_object)):
for k in surfs_object[s]._opt.__dict__.keys():
if k != '_opt':
if hasattr(surfs_object[s]._opt.__dict__[k], '__len__'):
if len(surfs_object[s]._opt.__dict__[k])>0:
if k == '_absorbed':
self._asm._objects[o].surfaces[s]._opt.__dict__[k] = [N.hstack(self._asm._objects[o].surfaces[s]._opt.__dict__[k])]
else:
self._asm._objects[o].surfaces[s]._opt.__dict__[k] = [N.column_stack(self._asm._objects[o].surfaces[s]._opt.__dict__[k])]
#else:
# self._asm._objects[o].surfaces[s]._opt.__dict__[k] = []
#print 'general assembly',' object ', o,' surface ', s,' number of rays: ', len(self._asm._objects[o].surfaces[s].get_optics_manager().get_all_hits()[0])
del resm
timepost2 = time.clock()-timepost
#print 'Post processing reassociation time: ', timepost2,'s'
| gpl-3.0 |
dillonjerry/aws | AWS-ElasticBeanstalk-CLI-2.6.2/eb/macosx/python3/lib/aws/requests/structures.py | 230 | 2329 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import os
from itertools import islice
class IteratorProxy(object):
"""docstring for IteratorProxy"""
def __init__(self, i):
self.i = i
# self.i = chain.from_iterable(i)
def __iter__(self):
return self.i
def __len__(self):
if hasattr(self.i, '__len__'):
return len(self.i)
if hasattr(self.i, 'len'):
return self.i.len
if hasattr(self.i, 'fileno'):
return os.fstat(self.i.fileno()).st_size
def read(self, n):
return "".join(islice(self.i, None, n))
class CaseInsensitiveDict(dict):
"""Case-insensitive Dictionary
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header."""
@property
def lower_keys(self):
if not hasattr(self, '_lower_keys') or not self._lower_keys:
self._lower_keys = dict((k.lower(), k) for k in list(self.keys()))
return self._lower_keys
def _clear_lower_keys(self):
if hasattr(self, '_lower_keys'):
self._lower_keys.clear()
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self._clear_lower_keys()
def __delitem__(self, key):
dict.__delitem__(self, self.lower_keys.get(key.lower(), key))
self._lower_keys.clear()
def __contains__(self, key):
return key.lower() in self.lower_keys
def __getitem__(self, key):
# We allow fall-through here, so values default to None
if key in self:
return dict.__getitem__(self, self.lower_keys[key.lower()])
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| gpl-2.0 |
40223114/w16 | static/Brython3.1.1-20150328-091302/Lib/select.py | 730 | 9440 | """
borrowed from jython
https://bitbucket.org/jython/jython/raw/28a66ba038620292520470a0bb4dc9bb8ac2e403/Lib/select.py
"""
#import java.nio.channels.SelectableChannel
#import java.nio.channels.SelectionKey
#import java.nio.channels.Selector
#from java.nio.channels.SelectionKey import OP_ACCEPT, OP_CONNECT, OP_WRITE, OP_READ
import errno
import os
import queue
import socket
class error(Exception): pass
ALL = None
_exception_map = {
# (<javaexception>, <circumstance>) : lambda: <code that raises the python equivalent>
#(java.nio.channels.ClosedChannelException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.CancelledKeyException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.IllegalBlockingModeException, ALL) : error(errno.ESOCKISBLOCKING, 'socket must be in non-blocking mode'),
}
def _map_exception(exc, circumstance=ALL):
try:
mapped_exception = _exception_map[(exc.__class__, circumstance)]
mapped_exception.java_exception = exc
return mapped_exception
except KeyError:
return error(-1, 'Unmapped java exception: <%s:%s>' % (exc.toString(), circumstance))
POLLIN = 1
POLLOUT = 2
# The following event types are completely ignored on jython
# Java does not support them, AFAICT
# They are declared only to support code compatibility with cpython
POLLPRI = 4
POLLERR = 8
POLLHUP = 16
POLLNVAL = 32
def _getselectable(selectable_object):
try:
channel = selectable_object.getchannel()
except:
try:
channel = selectable_object.fileno().getChannel()
except:
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
if channel and not isinstance(channel, java.nio.channels.SelectableChannel):
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
return channel
class poll:
def __init__(self):
self.selector = java.nio.channels.Selector.open()
self.chanmap = {}
self.unconnected_sockets = []
def _register_channel(self, socket_object, channel, mask):
jmask = 0
if mask & POLLIN:
# Note that OP_READ is NOT a valid event on server socket channels.
if channel.validOps() & OP_ACCEPT:
jmask = OP_ACCEPT
else:
jmask = OP_READ
if mask & POLLOUT:
if channel.validOps() & OP_WRITE:
jmask |= OP_WRITE
if channel.validOps() & OP_CONNECT:
jmask |= OP_CONNECT
selectionkey = channel.register(self.selector, jmask)
self.chanmap[channel] = (socket_object, selectionkey)
def _check_unconnected_sockets(self):
temp_list = []
for socket_object, mask in self.unconnected_sockets:
channel = _getselectable(socket_object)
if channel is not None:
self._register_channel(socket_object, channel, mask)
else:
temp_list.append( (socket_object, mask) )
self.unconnected_sockets = temp_list
def register(self, socket_object, mask = POLLIN|POLLOUT|POLLPRI):
try:
channel = _getselectable(socket_object)
if channel is None:
# The socket is not yet connected, and thus has no channel
# Add it to a pending list, and return
self.unconnected_sockets.append( (socket_object, mask) )
return
self._register_channel(socket_object, channel, mask)
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def unregister(self, socket_object):
try:
channel = _getselectable(socket_object)
self.chanmap[channel][1].cancel()
del self.chanmap[channel]
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _dopoll(self, timeout):
if timeout is None or timeout < 0:
self.selector.select()
else:
try:
timeout = int(timeout)
if not timeout:
self.selector.selectNow()
else:
# No multiplication required: both cpython and java use millisecond timeouts
self.selector.select(timeout)
except ValueError as vx:
raise error("poll timeout must be a number of milliseconds or None", errno.EINVAL)
# The returned selectedKeys cannot be used from multiple threads!
return self.selector.selectedKeys()
def poll(self, timeout=None):
try:
self._check_unconnected_sockets()
selectedkeys = self._dopoll(timeout)
results = []
for k in selectedkeys.iterator():
jmask = k.readyOps()
pymask = 0
if jmask & OP_READ: pymask |= POLLIN
if jmask & OP_WRITE: pymask |= POLLOUT
if jmask & OP_ACCEPT: pymask |= POLLIN
if jmask & OP_CONNECT: pymask |= POLLOUT
# Now return the original userobject, and the return event mask
results.append( (self.chanmap[k.channel()][0], pymask) )
return results
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _deregister_all(self):
try:
for k in self.selector.keys():
k.cancel()
# Keys are not actually removed from the selector until the next select operation.
self.selector.selectNow()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
self._deregister_all()
self.selector.close()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _calcselecttimeoutvalue(value):
if value is None:
return None
try:
floatvalue = float(value)
except Exception as x:
raise TypeError("Select timeout value must be a number or None")
if value < 0:
raise error("Select timeout value cannot be negative", errno.EINVAL)
if floatvalue < 0.000001:
return 0
return int(floatvalue * 1000) # Convert to milliseconds
# This cache for poll objects is required because of a bug in java on MS Windows
# http://bugs.jython.org/issue1291
class poll_object_cache:
def __init__(self):
self.is_windows = os.name == 'nt'
if self.is_windows:
self.poll_object_queue = Queue.Queue()
import atexit
atexit.register(self.finalize)
def get_poll_object(self):
if not self.is_windows:
return poll()
try:
return self.poll_object_queue.get(False)
except Queue.Empty:
return poll()
def release_poll_object(self, pobj):
if self.is_windows:
pobj._deregister_all()
self.poll_object_queue.put(pobj)
else:
pobj.close()
def finalize(self):
if self.is_windows:
while True:
try:
p = self.poll_object_queue.get(False)
p.close()
except Queue.Empty:
return
_poll_object_cache = poll_object_cache()
def native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
timeout = _calcselecttimeoutvalue(timeout)
# First create a poll object to do the actual watching.
pobj = _poll_object_cache.get_poll_object()
try:
registered_for_read = {}
# Check the read list
for fd in read_fd_list:
pobj.register(fd, POLLIN)
registered_for_read[fd] = 1
# And now the write list
for fd in write_fd_list:
if fd in registered_for_read:
# registering a second time overwrites the first
pobj.register(fd, POLLIN|POLLOUT)
else:
pobj.register(fd, POLLOUT)
results = pobj.poll(timeout)
# Now start preparing the results
read_ready_list, write_ready_list, oob_ready_list = [], [], []
for fd, mask in results:
if mask & POLLIN:
read_ready_list.append(fd)
if mask & POLLOUT:
write_ready_list.append(fd)
return read_ready_list, write_ready_list, oob_ready_list
finally:
_poll_object_cache.release_poll_object(pobj)
select = native_select
def cpython_compatible_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
# First turn all sockets to non-blocking
# keeping track of which ones have changed
modified_channels = []
try:
for socket_list in [read_fd_list, write_fd_list, outofband_fd_list]:
for s in socket_list:
channel = _getselectable(s)
if channel.isBlocking():
modified_channels.append(channel)
channel.configureBlocking(0)
return native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout)
finally:
for channel in modified_channels:
channel.configureBlocking(1)
| agpl-3.0 |
imsplitbit/nova | nova/api/openstack/compute/contrib/security_groups.py | 8 | 26909 | # Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
import contextlib
import json
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import api as compute_api
from nova import exception
from nova.network.security_group import neutron_driver
from nova.network.security_group import openstack_driver
from nova.openstack.common.gettextutils import _
from nova.openstack.common import xmlutils
from nova.virt import netutils
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def make_rule(elem):
elem.set('id')
elem.set('parent_group_id')
proto = xmlutil.SubTemplateElement(elem, 'ip_protocol')
proto.text = 'ip_protocol'
from_port = xmlutil.SubTemplateElement(elem, 'from_port')
from_port.text = 'from_port'
to_port = xmlutil.SubTemplateElement(elem, 'to_port')
to_port.text = 'to_port'
group = xmlutil.SubTemplateElement(elem, 'group', selector='group')
name = xmlutil.SubTemplateElement(group, 'name')
name.text = 'name'
tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id')
tenant_id.text = 'tenant_id'
ip_range = xmlutil.SubTemplateElement(elem, 'ip_range',
selector='ip_range')
cidr = xmlutil.SubTemplateElement(ip_range, 'cidr')
cidr.text = 'cidr'
def make_sg(elem):
elem.set('id')
elem.set('tenant_id')
elem.set('name')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
rules = xmlutil.SubTemplateElement(elem, 'rules')
rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules')
make_rule(rule)
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
sg_nsmap = {None: wsgi.XMLNS_V11}
class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group_rule',
selector='security_group_rule')
make_rule(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group',
selector='security_group')
make_sg(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_groups')
elem = xmlutil.SubTemplateElement(root, 'security_group',
selector='security_groups')
make_sg(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = xmlutil.safe_minidom_parse_string(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
'security_group')
if sg_node is not None:
if sg_node.hasAttribute('name'):
security_group['name'] = sg_node.getAttribute('name')
desc_node = self.find_first_child_named(sg_node,
"description")
if desc_node:
security_group['description'] = self.extract_text(desc_node)
return {'body': {'security_group': security_group}}
class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = xmlutil.safe_minidom_parse_string(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
def _extract_security_group_rule(self, node):
"""Marshal the security group rule attribute of a parsed request."""
sg_rule = {}
sg_rule_node = self.find_first_child_named(node,
'security_group_rule')
if sg_rule_node is not None:
ip_protocol_node = self.find_first_child_named(sg_rule_node,
"ip_protocol")
if ip_protocol_node is not None:
sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node)
from_port_node = self.find_first_child_named(sg_rule_node,
"from_port")
if from_port_node is not None:
sg_rule['from_port'] = self.extract_text(from_port_node)
to_port_node = self.find_first_child_named(sg_rule_node, "to_port")
if to_port_node is not None:
sg_rule['to_port'] = self.extract_text(to_port_node)
parent_group_id_node = self.find_first_child_named(sg_rule_node,
"parent_group_id")
if parent_group_id_node is not None:
sg_rule['parent_group_id'] = self.extract_text(
parent_group_id_node)
group_id_node = self.find_first_child_named(sg_rule_node,
"group_id")
if group_id_node is not None:
sg_rule['group_id'] = self.extract_text(group_id_node)
cidr_node = self.find_first_child_named(sg_rule_node, "cidr")
if cidr_node is not None:
sg_rule['cidr'] = self.extract_text(cidr_node)
return sg_rule
@contextlib.contextmanager
def translate_exceptions():
"""Translate nova exceptions to http exceptions."""
try:
yield
except exception.Invalid as exp:
msg = exp.format_message()
raise exc.HTTPBadRequest(explanation=msg)
except exception.SecurityGroupNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.SecurityGroupLimitExceeded as exp:
msg = exp.format_message()
raise exc.HTTPRequestEntityTooLarge(explanation=msg)
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule):
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
with translate_exceptions():
source_group = self.security_group_api.get(context,
id=rule['group_id'])
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
security_group['rules'] += [self._format_security_group_rule(
context, rule)]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPUnprocessableEntity()
value = body.get(key, None)
if value is None:
raise exc.HTTPUnprocessableEntity()
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
@wsgi.serializers(xml=SecurityGroupTemplate)
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
with translate_exceptions():
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@wsgi.serializers(xml=SecurityGroupTemplate)
@wsgi.deserializers(xml=SecurityGroupXMLDeserializer)
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
@wsgi.serializers(xml=SecurityGroupTemplate)
def update(self, req, id, body):
"""Update a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupRuleTemplate)
@wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer)
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
with translate_exceptions():
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id', None))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
try:
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except Exception as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
with translate_exceptions():
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
return {"security_group_rule": self._format_security_group_rule(
context,
security_group_rule)}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
with translate_exceptions():
instance = self.compute_api.get(context, server_id)
groups = self.security_group_api.get_instance_security_groups(
context, instance['uuid'], True)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
with translate_exceptions():
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = _authorize_context(req)
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
if len(servers) == 1:
group = (self.security_group_api
.get_instance_security_groups(context,
servers[0]['id']))
if group:
servers[0][key] = group
else:
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[key] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
try:
# try converting to json
req_obj = json.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][key] = req_obj['server'].get(
key, [{'name': 'default'}])
except ValueError:
root = xmlutils.safe_minidom_parse_string(req.body)
sg_root = root.getElementsByTagName(key)
groups = []
if sg_root:
security_groups = sg_root[0].getElementsByTagName(
'security_group')
for security_group in security_groups:
groups.append(
{'name': security_group.getAttribute('name')})
if not groups:
groups = [{'name': 'default'}]
servers[0][key] = groups
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
resp_obj.attach(xml=SecurityGroupServerTemplate())
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
resp_obj.attach(xml=SecurityGroupServersTemplate())
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroupsTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return "security_groups" in datum
def make_server(elem):
secgrps = SecurityGroupsTemplateElement('security_groups')
elem.append(secgrps)
secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group',
selector="security_groups")
secgrp.set('name')
class SecurityGroupServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1)
class SecurityGroupServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1)
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2013-05-28T00:00:00+00:00"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
class NativeSecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exception.SecurityGroupNotFound(msg)
class NativeNovaSecurityGroupAPI(NativeSecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class NativeNeutronSecurityGroupAPI(NativeSecurityGroupExceptions,
neutron_driver.SecurityGroupAPI):
pass
| apache-2.0 |
VielSoft/odoo | openerp/addons/base/__openerp__.py | 336 | 3703 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Base',
'version': '1.3',
'category': 'Hidden',
'description': """
The kernel of OpenERP, needed for all installation.
===================================================
""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': [],
'data': [
'base_data.xml',
'res/res_currency_data.xml',
'res/res_country_data.xml',
'security/base_security.xml',
'base_menu.xml',
'res/res_config.xml',
'res/res.country.state.csv',
'ir/ir_actions.xml',
'ir/ir_config_parameter_view.xml',
'ir/ir_cron_view.xml',
'ir/ir_filters.xml',
'ir/ir_mail_server_view.xml',
'ir/ir_model_view.xml',
'ir/ir_attachment_view.xml',
'ir/ir_rule_view.xml',
'ir/ir_sequence_view.xml',
'ir/ir_translation_view.xml',
'ir/ir_ui_menu_view.xml',
'ir/ir_ui_view_view.xml',
'ir/ir_values_view.xml',
'ir/osv_memory_autovacuum.xml',
'ir/ir_model_report.xml',
'ir/ir_logging_view.xml',
'ir/ir_qweb.xml',
'workflow/workflow_view.xml',
'module/module_view.xml',
'module/module_data.xml',
'module/module_report.xml',
'module/wizard/base_module_update_view.xml',
'module/wizard/base_language_install_view.xml',
'module/wizard/base_import_language_view.xml',
'module/wizard/base_module_upgrade_view.xml',
'module/wizard/base_module_configuration_view.xml',
'module/wizard/base_export_language_view.xml',
'module/wizard/base_update_translations_view.xml',
'module/wizard/base_module_immediate_install.xml',
'res/res_company_view.xml',
'res/res_request_view.xml',
'res/res_lang_view.xml',
'res/res_partner_report.xml',
'res/res_partner_view.xml',
'res/res_bank_view.xml',
'res/res_country_view.xml',
'res/res_currency_view.xml',
'res/res_users_view.xml',
'res/res_partner_data.xml',
'res/ir_property_view.xml',
'res/res_security.xml',
'security/ir.model.access.csv',
],
'demo': [
'base_demo.xml',
'res/res_partner_demo.xml',
'res/res_partner_demo.yml',
'res/res_partner_image_demo.xml',
],
'test': [
'tests/base_test.yml',
'tests/test_osv_expression.yml',
'tests/test_ir_rule.yml', # <-- These tests modify/add/delete ir_rules.
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yinsu/grpc | src/python/grpcio_test/grpc_test/framework/interfaces/base/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
netscaler/horizon | horizon/utils/fields.py | 7 | 4625 | from django.core.exceptions import ValidationError # noqa
from django.forms import forms
from django.forms import widgets
from django.utils.encoding import force_unicode # noqa
from django.utils.functional import Promise # noqa
from django.utils.html import conditional_escape # noqa
from django.utils.html import escape # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
import netaddr
import re
ip_allowed_symbols_re = re.compile(r'^[a-fA-F0-9:/\.]+$')
IPv4 = 1
IPv6 = 2
class IPField(forms.Field):
"""
Form field for entering IP/range values, with validation.
Supports IPv4/IPv6 in the format:
.. xxx.xxx.xxx.xxx
.. xxx.xxx.xxx.xxx/zz
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/zz
and all compressed forms. Also the short forms
are supported:
xxx/yy
xxx.xxx/yy
.. attribute:: version
Specifies which IP version to validate,
valid values are 1 (fields.IPv4), 2 (fields.IPv6) or
both - 3 (fields.IPv4 | fields.IPv6).
Defaults to IPv4 (1)
.. attribute:: mask
Boolean flag to validate subnet masks along with IP address.
E.g: 10.0.0.1/32
.. attribute:: mask_range_from
Subnet range limitation, e.g. 16
That means the input mask will be checked to be in the range
16:max_value. Useful to limit the subnet ranges
to A/B/C-class networks.
"""
invalid_format_message = _("Incorrect format for IP address")
invalid_version_message = _("Invalid version for IP address")
invalid_mask_message = _("Invalid subnet mask")
max_v4_mask = 32
max_v6_mask = 128
def __init__(self, *args, **kwargs):
self.mask = kwargs.pop("mask", None)
self.min_mask = kwargs.pop("mask_range_from", 0)
self.version = kwargs.pop('version', IPv4)
super(IPField, self).__init__(*args, **kwargs)
def validate(self, value):
super(IPField, self).validate(value)
if not value and not self.required:
return
try:
if self.mask:
self.ip = netaddr.IPNetwork(value)
else:
self.ip = netaddr.IPAddress(value)
except Exception:
raise ValidationError(self.invalid_format_message)
if not any([self.version & IPv4 > 0 and self.ip.version == 4,
self.version & IPv6 > 0 and self.ip.version == 6]):
raise ValidationError(self.invalid_version_message)
if self.mask:
if self.ip.version == 4 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v4_mask:
raise ValidationError(self.invalid_mask_message)
if self.ip.version == 6 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v6_mask:
raise ValidationError(self.invalid_mask_message)
def clean(self, value):
super(IPField, self).clean(value)
return str(getattr(self, "ip", ""))
class SelectWidget(widgets.Select):
"""
Customizable select widget, that allows to render
data-xxx attributes from choices.
.. attribute:: data_attrs
Specifies object properties to serialize as
data-xxx attribute. If passed ('id', ),
this will be rendered as:
<option data-id="123">option_value</option>
where 123 is the value of choice_value.id
.. attribute:: transform
A callable used to render the display value
from the option object.
"""
def __init__(self, attrs=None, choices=(), data_attrs=(), transform=None):
self.data_attrs = data_attrs
self.transform = transform
super(SelectWidget, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
option_value = force_unicode(option_value)
other_html = (option_value in selected_choices) and \
u' selected="selected"' or ''
if not isinstance(option_label, (basestring, Promise)):
for data_attr in self.data_attrs:
data_value = conditional_escape(
force_unicode(getattr(option_label,
data_attr, "")))
other_html += ' data-%s="%s"' % (data_attr, data_value)
if self.transform:
option_label = self.transform(option_label)
return u'<option value="%s"%s>%s</option>' % (
escape(option_value), other_html,
conditional_escape(force_unicode(option_label)))
| apache-2.0 |
cloudera/hue | desktop/core/ext-py/Babel-2.5.1/babel/messages/jslexer.py | 10 | 6329 | # -*- coding: utf-8 -*-
"""
babel.messages.jslexer
~~~~~~~~~~~~~~~~~~~~~~
A simple JavaScript 1.5 lexer which is used for the JavaScript
extractor.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from collections import namedtuple
import re
from babel._compat import unichr
operators = sorted([
'+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
'+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
'>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
'[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':'
], key=len, reverse=True)
escapes = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
name_re = re.compile(r'[\w$_][\w\d$_]*', re.UNICODE)
dotted_name_re = re.compile(r'[\w$_][\w\d$_.]*[\w\d$_.]', re.UNICODE)
division_re = re.compile(r'/=?')
regex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*', re.DOTALL)
line_re = re.compile(r'(\r\n|\n|\r)')
line_join_re = re.compile(r'\\' + line_re.pattern)
uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
Token = namedtuple('Token', 'type value lineno')
_rules = [
(None, re.compile(r'\s+', re.UNICODE)),
(None, re.compile(r'<!--.*')),
('linecomment', re.compile(r'//.*')),
('multilinecomment', re.compile(r'/\*.*?\*/', re.UNICODE | re.DOTALL)),
('dotted_name', dotted_name_re),
('name', name_re),
('number', re.compile(r'''(
(?:0|[1-9]\d*)
(\.\d+)?
([eE][-+]?\d+)? |
(0x[a-fA-F0-9]+)
)''', re.VERBOSE)),
('jsx_tag', re.compile(r'(?:</?[^>\s]+|/>)', re.I)), # May be mangled in `get_rules`
('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))),
('template_string', re.compile(r'''`(?:[^`\\]*(?:\\.[^`\\]*)*)`''', re.UNICODE)),
('string', re.compile(r'''(
'(?:[^'\\]*(?:\\.[^'\\]*)*)' |
"(?:[^"\\]*(?:\\.[^"\\]*)*)"
)''', re.VERBOSE | re.DOTALL))
]
def get_rules(jsx, dotted, template_string):
"""
Get a tokenization rule list given the passed syntax options.
Internal to this module.
"""
rules = []
for token_type, rule in _rules:
if not jsx and token_type and 'jsx' in token_type:
continue
if not template_string and token_type == 'template_string':
continue
if token_type == 'dotted_name':
if not dotted:
continue
token_type = 'name'
rules.append((token_type, rule))
return rules
def indicates_division(token):
"""A helper function that helps the tokenizer to decide if the current
token may be followed by a division operator.
"""
if token.type == 'operator':
return token.value in (')', ']', '}', '++', '--')
return token.type in ('name', 'number', 'string', 'regexp')
def unquote_string(string):
"""Unquote a string with JavaScript rules. The string has to start with
string delimiters (``'``, ``"`` or the back-tick/grave accent (for template strings).)
"""
assert string and string[0] == string[-1] and string[0] in '"\'`', \
'string provided is not properly delimited'
string = line_join_re.sub('\\1', string[1:-1])
result = []
add = result.append
pos = 0
while 1:
# scan for the next escape
escape_pos = string.find('\\', pos)
if escape_pos < 0:
break
add(string[pos:escape_pos])
# check which character is escaped
next_char = string[escape_pos + 1]
if next_char in escapes:
add(escapes[next_char])
# unicode escapes. trie to consume up to four characters of
# hexadecimal characters and try to interpret them as unicode
# character point. If there is no such character point, put
# all the consumed characters into the string.
elif next_char in 'uU':
escaped = uni_escape_re.match(string, escape_pos + 2)
if escaped is not None:
escaped_value = escaped.group()
if len(escaped_value) == 4:
try:
add(unichr(int(escaped_value, 16)))
except ValueError:
pass
else:
pos = escape_pos + 6
continue
add(next_char + escaped_value)
pos = escaped.end()
continue
else:
add(next_char)
# bogus escape. Just remove the backslash.
else:
add(next_char)
pos = escape_pos + 2
if pos < len(string):
add(string[pos:])
return u''.join(result)
def tokenize(source, jsx=True, dotted=True, template_string=True):
"""
Tokenize JavaScript/JSX source. Returns a generator of tokens.
:param jsx: Enable (limited) JSX parsing.
:param dotted: Read dotted names as single name token.
:param template_string: Support ES6 template strings
"""
may_divide = False
pos = 0
lineno = 1
end = len(source)
rules = get_rules(jsx=jsx, dotted=dotted, template_string=template_string)
while pos < end:
# handle regular rules first
for token_type, rule in rules:
match = rule.match(source, pos)
if match is not None:
break
# if we don't have a match we don't give up yet, but check for
# division operators or regular expression literals, based on
# the status of `may_divide` which is determined by the last
# processed non-whitespace token using `indicates_division`.
else:
if may_divide:
match = division_re.match(source, pos)
token_type = 'operator'
else:
match = regex_re.match(source, pos)
token_type = 'regexp'
if match is None:
# woops. invalid syntax. jump one char ahead and try again.
pos += 1
continue
token_value = match.group()
if token_type is not None:
token = Token(token_type, token_value, lineno)
may_divide = indicates_division(token)
yield token
lineno += len(line_re.findall(token_value))
pos = match.end()
| apache-2.0 |
TheDegree0/menescraper | menescraper/menescraper/lib/python2.7/site-packages/setuptools/tests/test_egg_info.py | 333 | 5401 |
import os
import sys
import tempfile
import shutil
import unittest
import pkg_resources
import warnings
from setuptools.command import egg_info
from setuptools import svn_utils
from setuptools.tests import environment, test_svn
from setuptools.tests.py26compat import skipIf
ENTRIES_V10 = pkg_resources.resource_string(__name__, 'entries-v10')
"An entries file generated with svn 1.6.17 against the legacy Setuptools repo"
class TestEggInfo(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.test_dir, '.svn'))
self.old_cwd = os.getcwd()
os.chdir(self.test_dir)
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.test_dir)
def _write_entries(self, entries):
fn = os.path.join(self.test_dir, '.svn', 'entries')
entries_f = open(fn, 'wb')
entries_f.write(entries)
entries_f.close()
@skipIf(not test_svn._svn_check, "No SVN to text, in the first place")
def test_version_10_format(self):
"""
"""
#keeping this set for 1.6 is a good check on the get_svn_revision
#to ensure I return using svnversion what would had been returned
version_str = svn_utils.SvnInfo.get_svn_version()
version = [int(x) for x in version_str.split('.')[:2]]
if version != [1, 6]:
if hasattr(self, 'skipTest'):
self.skipTest('')
else:
sys.stderr.write('\n Skipping due to SVN Version\n')
return
self._write_entries(ENTRIES_V10)
rev = egg_info.egg_info.get_svn_revision()
self.assertEqual(rev, '89000')
def test_version_10_format_legacy_parser(self):
"""
"""
path_variable = None
for env in os.environ:
if env.lower() == 'path':
path_variable = env
if path_variable:
old_path = os.environ[path_variable]
os.environ[path_variable] = ''
#catch_warnings not available until py26
warning_filters = warnings.filters
warnings.filters = warning_filters[:]
try:
warnings.simplefilter("ignore", DeprecationWarning)
self._write_entries(ENTRIES_V10)
rev = egg_info.egg_info.get_svn_revision()
finally:
#restore the warning filters
warnings.filters = warning_filters
#restore the os path
if path_variable:
os.environ[path_variable] = old_path
self.assertEqual(rev, '89000')
DUMMY_SOURCE_TXT = """CHANGES.txt
CONTRIBUTORS.txt
HISTORY.txt
LICENSE
MANIFEST.in
README.txt
setup.py
dummy/__init__.py
dummy/test.txt
dummy.egg-info/PKG-INFO
dummy.egg-info/SOURCES.txt
dummy.egg-info/dependency_links.txt
dummy.egg-info/top_level.txt"""
class TestSvnDummy(environment.ZippedEnvironment):
def setUp(self):
version = svn_utils.SvnInfo.get_svn_version()
if not version: # None or Empty
return None
self.base_version = tuple([int(x) for x in version.split('.')][:2])
if not self.base_version:
raise ValueError('No SVN tools installed')
elif self.base_version < (1, 3):
raise ValueError('Insufficient SVN Version %s' % version)
elif self.base_version >= (1, 9):
#trying the latest version
self.base_version = (1, 8)
self.dataname = "dummy%i%i" % self.base_version
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', self.dataname + ".zip")
super(TestSvnDummy, self).setUp()
@skipIf(not test_svn._svn_check, "No SVN to text, in the first place")
def test_sources(self):
code, data = environment.run_setup_py(["sdist"],
pypath=self.old_cwd,
data_stream=1)
if code:
raise AssertionError(data)
sources = os.path.join('dummy.egg-info', 'SOURCES.txt')
infile = open(sources, 'r')
try:
read_contents = infile.read()
finally:
infile.close()
del infile
self.assertEqual(DUMMY_SOURCE_TXT, read_contents)
return data
class TestSvnDummyLegacy(environment.ZippedEnvironment):
def setUp(self):
self.base_version = (1, 6)
self.dataname = "dummy%i%i" % self.base_version
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', self.dataname + ".zip")
super(TestSvnDummyLegacy, self).setUp()
def test_sources(self):
code, data = environment.run_setup_py(["sdist"],
pypath=self.old_cwd,
path="",
data_stream=1)
if code:
raise AssertionError(data)
sources = os.path.join('dummy.egg-info', 'SOURCES.txt')
infile = open(sources, 'r')
try:
read_contents = infile.read()
finally:
infile.close()
del infile
self.assertEqual(DUMMY_SOURCE_TXT, read_contents)
return data
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| gpl-2.0 |
inejc/nnlib | tests/test_optimizers.py | 1 | 2727 | from unittest import TestCase
from nnlib.layers import Layer, ParamGradNames
from nnlib.optimizers import SGD, SGDMomentum
class DummyLayer(Layer):
def __init__(self):
self.dummy_param0 = 10
self.dummy_grad0 = 2
self.dummy_param1 = 5
self.dummy_grad1 = 1
def forward(self):
pass
def backward(self):
pass
def get_updatable_params_grads_names(self):
return [
ParamGradNames(param_name='dummy_param0', grad_name='dummy_grad0'),
ParamGradNames(param_name='dummy_param1', grad_name='dummy_grad1')
]
class SGDTest(TestCase):
def setUp(self):
self.sgd = SGD(lr=0.5)
self.sgd_m = SGDMomentum(lr=0.5, nesterov=False)
self.sgd_m_n = SGDMomentum(lr=0.5)
self.layer = DummyLayer()
def test_register_layer_sgd(self):
self.assertEqual(len(self.sgd._layers), 0)
self.sgd.register_layer(self.layer)
self.assertEqual(len(self.sgd._layers), 1)
self.sgd.register_layer(self.layer)
self.assertEqual(len(self.sgd._layers), 2)
def test_register_layer_sgd_m(self):
self.assertEqual(len(self.sgd_m._layers_caches), 0)
self.sgd_m.register_layer(self.layer)
self.assertEqual(len(self.sgd_m._layers_caches), 1)
self.sgd_m.register_layer(self.layer)
self.assertEqual(len(self.sgd_m._layers_caches), 2)
def test_register_layer_sgd_m_n(self):
self.assertEqual(len(self.sgd_m_n._layers_caches), 0)
self.sgd_m_n.register_layer(self.layer)
self.assertEqual(len(self.sgd_m_n._layers_caches), 1)
self.sgd_m_n.register_layer(self.layer)
self.assertEqual(len(self.sgd_m_n._layers_caches), 2)
def test_make_updates_sgd(self):
self.sgd.register_layer(self.layer)
self.sgd.update_layers()
self.assertEqual(self.layer.dummy_param0, 9)
self.assertEqual(self.layer.dummy_param1, 4.5)
def test_make_updates_sgd_m(self):
self.sgd_m.register_layer(self.layer)
self.sgd_m.update_layers()
self.assertEqual(self.layer.dummy_param0, 9)
self.assertEqual(self.layer.dummy_param1, 4.5)
self.sgd_m.update_layers()
self.assertEqual(self.layer.dummy_param0, 7.1)
self.assertEqual(self.layer.dummy_param1, 3.55)
def test_make_updates_sgd_m_n(self):
self.sgd_m_n.register_layer(self.layer)
self.sgd_m_n.update_layers()
self.assertEqual(self.layer.dummy_param0, 8.1)
self.assertEqual(self.layer.dummy_param1, 4.05)
self.sgd_m_n.update_layers()
self.assertEqual(self.layer.dummy_param0, 5.39)
self.assertEqual(self.layer.dummy_param1, 2.695)
| mit |
jonasrk/volunteer_planner | blueprint/views.py | 7 | 2391 | # coding: utf-8
import json
from django.http.response import HttpResponse
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import user_passes_test
from dateutil.parser import parse
from scheduler.models import Location, Need
from .models import BluePrintCreator
class SuperuserRequiredMixin(object):
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SuperuserRequiredMixin, self).dispatch(*args, **kwargs)
class ExecuteBluePrintView(SuperuserRequiredMixin, TemplateView):
template_name = "blueprint_executor.html"
def get_context_data(self, **kwargs):
if 'locations' not in kwargs:
kwargs['locations'] = Location.objects.all()
return kwargs
@user_passes_test(lambda u: u.is_superuser)
def generate_blueprint(request):
if request.method == 'POST' and request.is_ajax():
locations = json.loads(request.POST.get('locations'))
for location_id in locations:
location = Location.objects.get(pk=location_id)
blueprint = BluePrintCreator.objects.get(location=location)
message = []
for need in blueprint.needs.all():
time_from = parse(
request.POST.get('date') + " " + need.from_time,
ignoretz=True, fuzzy=True)
time_to = parse(request.POST.get('date') + " " + need.to_time,
ignoretz=True, fuzzy=True)
# TODO: remove string casting dates here??
if Need.objects.filter(topic=need.topic,
location=location,
starting_time=str(time_from),
ending_time=str(time_to)).count() > 0:
message.append('Ist bereits vorhanden')
else:
Need.objects.create(topic=need.topic, location=location,
starting_time=time_from,
ending_time=time_to, slots=need.slots)
message.append('Ist angelegt worden!')
return HttpResponse(json.dumps({"data": message}),
content_type="application/json")
| agpl-3.0 |
antonve/s4-project-mooc | common/djangoapps/course_about/tests/test_api.py | 19 | 1861 | """
Tests the logical Python API layer of the Course About API.
"""
import ddt
import json
import unittest
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from django.conf import settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, CourseAboutFactory
from student.tests.factories import UserFactory
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CourseInfoTest(ModuleStoreTestCase, APITestCase):
"""
Test course information.
"""
USERNAME = "Bob"
EMAIL = "[email protected]"
PASSWORD = "edx"
def setUp(self):
""" Create a course"""
super(CourseInfoTest, self).setUp()
self.course = CourseFactory.create()
self.user = UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
def test_get_course_details_from_cache(self):
kwargs = dict()
kwargs["course_id"] = self.course.id
kwargs["course_runtime"] = self.course.runtime
kwargs["user_id"] = self.user.id
CourseAboutFactory.create(**kwargs)
resp = self.client.get(
reverse('courseabout', kwargs={"course_id": unicode(self.course.id)})
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp_data = json.loads(resp.content)
self.assertIsNotNone(resp_data)
resp = self.client.get(
reverse('courseabout', kwargs={"course_id": unicode(self.course.id)})
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp_data = json.loads(resp.content)
self.assertIsNotNone(resp_data)
| agpl-3.0 |
10clouds/edx-platform | lms/djangoapps/branding/__init__.py | 22 | 3065 | """
EdX Branding package.
Provides a way to retrieve "branded" parts of the site.
This module provides functions to retrieve basic branded parts
such as the site visible courses, university name and logo.
"""
from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
from django.conf import settings
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from microsite_configuration import microsite
from django.contrib.staticfiles.storage import staticfiles_storage
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
def get_visible_courses(org=None, filter_=None):
"""
Return the set of CourseOverviews that should be visible in this branded
instance.
Arguments:
org (string): Optional parameter that allows case-insensitive
filtering by organization.
filter_ (dict): Optional parameter that allows custom filtering by
fields on the course.
"""
microsite_org = microsite.get_value('course_org_filter')
if org and microsite_org:
# When called in the context of a microsite, return an empty result if the org
# passed by the caller does not match the designated microsite org.
courses = CourseOverview.get_all_courses(
org=org,
filter_=filter_,
) if org == microsite_org else []
else:
# We only make it to this point if one of org or microsite_org is defined.
# If both org and microsite_org were defined, the code would have fallen into the
# first branch of the conditional above, wherein an equality check is performed.
target_org = org or microsite_org
courses = CourseOverview.get_all_courses(org=target_org, filter_=filter_)
courses = sorted(courses, key=lambda course: course.number)
# When called in the context of a microsite, filtering can stop here.
if microsite_org:
return courses
# See if we have filtered course listings in this domain
filtered_visible_ids = None
# this is legacy format which is outside of the microsite feature -- also handle dev case, which should not filter
subdomain = microsite.get_value('subdomain', 'default')
if hasattr(settings, 'COURSE_LISTINGS') and subdomain in settings.COURSE_LISTINGS and not settings.DEBUG:
filtered_visible_ids = frozenset(
[SlashSeparatedCourseKey.from_deprecated_string(c) for c in settings.COURSE_LISTINGS[subdomain]]
)
if filtered_visible_ids:
return [course for course in courses if course.id in filtered_visible_ids]
else:
# Filter out any courses belonging to a microsite, to avoid leaking these.
microsite_orgs = microsite.get_all_orgs()
return [course for course in courses if course.location.org not in microsite_orgs]
def get_university_for_request():
"""
Return the university name specified for the domain, or None
if no university was specified
"""
return microsite.get_value('university')
| agpl-3.0 |
gisce/OCB | openerp/report/printscreen/__init__.py | 77 | 1239 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ps_list
import ps_form
#.apidoc title: Printscreen Support
""" A special report, that is automatically formatted to look like the
screen contents of Form/List Views.
"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cantora/mozz | mozz/session.py | 1 | 16225 | # Copyright 2013 anthony cantor
# This file is part of mozz.
#
# mozz is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mozz is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mozz. If not, see <http://www.gnu.org/licenses/>.
import os
from collections import namedtuple
import struct
import mozz.err
from mozz.cb import *
import mozz.log
import mozz.abi.endian
from mozz import location
class SessionErr(mozz.err.Err):
pass
class FunctionContext(object):
'''
placeholder. still needs to be implemented
'''
pass
class Addr(object):
def value(self, inferior):
'''
convert this address into an integer value,
potentially using the inferior to resolve.
'''
raise NotImplementedError("not implemented")
class SymbolOffset(namedtuple('SymbolOffsetBase', 'name offset'), Addr):
'''
represents the numeric value of a symbol + some offset
'''
def __init__(self, name, offset):
if not isinstance(offset, (int, long)):
offset = 0
super(SymbolOffset, self).__init__(name, offset)
def value(self, inferior):
return inferior.symbol_addr(self.name) + self.offset
class NumericAddr(namedtuple('NumericAddrBase', 'addr base'), Addr):
def __init__(self, addr, base):
'''
a runtime address. @addr should be an
integer. if @base is a `str` then @base will
be looked up as a section, the runtime
segment mapping of that section shall be
determined, and @addr will be treated as
an offset into the segment in which the
section resides.
'''
if not isinstance(base, str):
base = None
super(NumericAddr, self).__init__(addr, base)
def value(self, inferior):
'''
havent implemented self.base functionality yet,
so only absolute addresses are supported now
'''
return self.addr
def addr_from_int(n):
return NumericAddr(n, None)
def convert_values_to_addrs(*args):
result = []
for x in args:
if isinstance(x, (int,long)):
newx = NumericAddr(x, None)
elif isinstance(x, str):
newx = SymbolOffset(x, 0)
elif isinstance(x, tuple) and len(x) == 2 \
and isinstance(x[0], str) \
and isinstance(x[1], (int,long)):
newx = SymbolOffset(*x)
elif isinstance(x, Addr):
newx = x
else:
raise TypeError("invalid input address %r" % x)
result.append(newx)
return tuple(result)
class Session(object):
def __init__(self, target, limit=1):
'''
@target: the target binary path
@limit: the maximum number of times to
run the session before stopping.
'''
self.event_cbs = {}
self.addr_cbs = {}
self.mockups = {}
self.function_cbs = {}
self.function_mockups = {}
self.skip_map = {}
self.break_counts = {}
self.n = 0
self.set_little_endian()
self._stack_grows_down = True
self.target = target
self._target_args = tuple([])
self._target_kwargs = {}
self.calling_convention = None
'''
valid keyword args:
'stdin': IOConfig instance
'stdout': IOConfig instance
'stderr': IOConfig instance
'''
self.flags = {}
self.flag_finished = False
if limit >= 0:
self.limit = limit
else:
self.limit = 1
def set_calling_convention(self, cc):
self.calling_convention = cc
def set_little_endian(self):
self._endian = mozz.abi.endian.Little
def set_big_endian(self):
self._endian = mozz.abi.endian.Big
def set_stack_grows_up(self):
self._stack_grows_down = False
def endian(self):
return self._endian
def stack_grows_down(self):
return self._stack_grows_down
@property
def target_args(self):
return self._target_args
@property
def target_kwargs(self):
return self._target_kwargs
def set_target_args(self, *args):
self._target_args = args
def set_target_kwargs(self, **kwargs):
self._target_kwargs = kwargs
def iteration(self):
return self.n
def add_event_cb_fn(self, name):
def tmp(fn):
if name not in self.event_cbs:
self.event_cbs[name] = []
self.event_cbs[name].append(fn)
return fn
return tmp
def remove_event_cb_fn(self, name, fn):
if name not in self.event_cbs:
return False
if fn not in self.event_cbs[name]:
return False
self.event_cbs[name] = [
x for x in self.event_cbs[name] if x != fn
]
return True
def add_addr_cb_fn(self, addr, *args, **kwargs):
def tmp(fn):
if addr not in self.addr_cbs:
self.addr_cbs[addr] = []
self.addr_cbs[addr].append((fn, args, kwargs))
return fn
return tmp
def del_addr_cb_fn(self, addr, fn):
(addr,) = convert_values_to_addrs(addr)
if addr not in self.addr_cbs:
return False
found = False
new_list = []
for (func, args, kwargs) in self.addr_cbs[addr]:
if fn == func:
found = True
else:
new_list.append((func, args, kwargs))
if found:
self.addr_cbs[addr] = new_list
return True
else:
return False
def on_inferior_pre(self):
'''
called just after inferior object is created
and before it is run
'''
return self.add_event_cb_fn(INFERIOR_PRE)
def del_cb_inferior_pre(self, fn):
return self.remove_event_cb_fn(INFERIOR_PRE, fn)
def on_inferior_post(self):
'''
called just after inferior finishes and just
before it the inferior object is destroyed
'''
return self.add_event_cb_fn(INFERIOR_POST)
def del_cb_inferior_post(self, fn):
return self.remove_event_cb_fn(INFERIOR_POST, fn)
def at_entry(self):
'''
invoke the decorated function at the execution of
the entry point
'''
return self.add_event_cb_fn(ENTRY)
def del_cb_entry(self, fn):
return self.remove_event_cb_fn(ENTRY, fn)
def on_step(self):
'''
invoke the decorated function each time the inferior
stops in step mode. this will only be called once per
instruction per stop, i.e. once every time the inferior
steps.
'''
return self.add_event_cb_fn(STEP)
def del_cb_step(self, fn):
return self.remove_event_cb_fn(STEP, fn)
def at_function(self, addr, *args, **kwargs):
(addr,) = convert_values_to_addrs(addr)
def tmp(fn):
self.function_cbs[addr] = (fn, args, kwargs)
return fn
return tmp
def mockup_function(self, addr, *args, **kwargs):
(addr,) = convert_values_to_addrs(addr)
def tmp(fn):
self.function_mockups[addr] = (fn, args, kwargs)
return fn
return tmp
def at_addr(self, addr, *args, **kwargs):
(addr,) = convert_values_to_addrs(addr)
return self.add_addr_cb_fn(addr, *args, **kwargs)
def mockup(self, addr, jmp, *args, **kwargs):
(addr, jmp) = convert_values_to_addrs(addr, jmp)
def tmp(fn):
self.mockups[addr] = (fn, jmp, kwargs)
return fn
return tmp
def skip(self, addr, end, *args, **kwargs):
'''
skip instructions at address [@addr, @end). in otherwords,
jump to @end when we arrive at @addr
'''
(addr, end) = convert_values_to_addrs(addr, end)
self.skip_map[addr] = (end, args, kwargs)
def on_run(self):
'''
invoke this callback when the host is ready to
run the session.
'''
return self.add_event_cb_fn(RUN)
def del_cb_run(self, fn):
return self.remove_event_cb_fn(RUN, fn)
def on_finish(self):
'''
invoke this callback when the session is finished
and about to be destroyed.
'''
return self.add_event_cb_fn(FINISH)
def del_cb_finish(self, fn):
return self.remove_event_cb_fn(FINISH, fn)
def on_signal_default(self):
return self.add_event_cb_fn(SIGNAL_DEFAULT)
def del_cb_signal_default(self, fn):
return self.remove_event_cb_fn(SIGNAL_DEFAULT, fn)
def on_signal(self, sig):
return self.add_event_cb_fn(sig)
def del_cb_signal(self, sig, fn):
return self.remove_event_cb_fn(sig, fn)
def on_signal_unknown(self):
return self.add_event_cb_fn(SIGNAL_UNKNOWN)
def del_cb_signal_unknown(self, fn):
return self.remove_event_cb_fn(SIGNAL_UNKNOWN, fn)
def on_start(self):
'''
just before an inferior starts (after being stopped)
'''
return self.add_event_cb_fn(START)
def del_cb_start(self, fn):
return self.remove_event_cb_fn(START, fn)
def on_obj_load(self):
'''
just after an object is loaded
'''
return self.add_event_cb_fn(OBJ_LOAD)
def del_cb_obj_load(self, fn):
return self.remove_event_cb_fn(OBJ_LOAD, fn)
def on_exit(self):
return self.add_event_cb_fn(EXIT)
def del_cb_exit(self, fn):
return self.remove_event_cb_fn(EXIT, fn)
def process_event(self, name, *args, **kwargs):
if name == INFERIOR_PRE:
self.n += 1
elif name == INFERIOR_POST:
if self.limit > 0 and self.n >= self.limit:
self.set_flag_finished()
def notify_event(self, name, *args, **kwargs):
self.process_event(name, *args, **kwargs)
handled = False
if not name in self.event_cbs \
or len(self.event_cbs[name]) < 1:
return False
for fn in self.event_cbs[name]:
if not callable(fn):
continue
fn(*args, **kwargs)
handled = True
return handled
def find_addrs(self, d, addr, inferior):
i = addr.value(inferior)
for (k, v) in d.items():
kval = k.value(inferior)
if kval == i:
yield (k, v)
def inc_break_count(self, addr):
if not addr in self.break_counts:
self.break_counts[addr] = 1
else:
self.break_counts[addr] += 1
def break_count(self, addr):
if not addr in self.break_counts:
return 0
else:
return self.break_counts[addr]
def notify_addr(self, addr, host, *args, **kwargs):
#mozz.log.debug("notify address %r" % (addr,))
handled = False
mockup_handled = False
skip_handled = False
for (_, ls) in self.find_addrs(self.addr_cbs, addr, host.inferior()):
for (fn, _, options) in ls:
if not callable(fn):
continue
extargs = self.make_addr_ext_args(host, **options)
handled = True
fn(host, *(extargs + args), **kwargs)
for (_, (fn, proto_args, options)) in self.find_addrs(self.function_cbs, addr, host.inferior()):
if not callable(fn):
continue
handled = True
self.do_function_callback(host, addr, fn, proto_args, options, *args, **kwargs)
for (_, (fn, jmp, options)) in self.find_addrs(self.mockups, addr, host.inferior()):
if not callable(fn):
continue
mockup_handled = True
self.do_mockup_callback(host, fn, jmp, options, *args, **kwargs)
break # we can only do one mockup per address
#skips and function mockups have lower precedence than mockups
if not mockup_handled:
for (_, (fn, proto_args, options)) in self.find_addrs(self.function_mockups, addr, host.inferior()):
if not callable(fn):
continue
mockup_handled = True
self.do_function_mockup(host, addr, fn, proto_args, options, *args, **kwargs)
break # only one mockup per address
if not mockup_handled:
for (_, (jmp, _, options)) in self.find_addrs(self.skip_map, addr, host.inferior()):
skip_handled = True
self.do_jmp(host, jmp, **options)
break # one skip per address
self.inc_break_count(addr)
return handled or mockup_handled or skip_handled
def make_addr_ext_args(self, host, **kwargs):
locargs = []
if 'locs' in kwargs:
for loc in kwargs['locs']:
if isinstance(loc, str):
locargs.append(host.inferior().reg(loc))
elif isinstance(loc, location.Register):
locargs.append(host.inferior().reg(loc.name()))
elif isinstance(loc, location.RegOffset):
val = loc.value(host)
locargs.append(val)
return tuple(locargs)
def do_mockup_callback(self, host, fn, jmp, options, *args, **kwargs):
extargs = self.make_addr_ext_args(host, **options)
args = extargs + args
fn(host, *args, **kwargs)
self.do_jmp(host, jmp, **options)
def get_function_arg_vals(self, host, proto_args):
cc = self.calling_convention(host)
arg_vals = []
for i in range(len(proto_args)):
arg = proto_args[i]
arg_vals.append(arg(self.endian(), *cc.arg(arg, i+1)))
return arg_vals
def do_function_callback(self, host, addr, fn, proto_args, options, *args, **kwargs):
if not self.calling_convention:
raise Exception("a calling convention must " + \
"be set to use function callbacks")
arg_vals = self.get_function_arg_vals(host, proto_args)
break_count = self.break_count(addr)
fn_ctx = FunctionContext()
fn(host, fn_ctx, break_count, *arg_vals)
def do_function_mockup(self, host, addr, fn, proto_args, options, *args, **kwargs):
if not self.calling_convention:
raise Exception("a calling convention must " + \
"be set to use function mockups")
arg_vals = self.get_function_arg_vals(host, proto_args)
break_count = self.break_count(addr)
fn_ctx = FunctionContext()
ret_val = fn(host, fn_ctx, break_count, *arg_vals)
if ret_val is None:
return #cancel mockup fn returned None
cc = self.calling_convention(host)
@host.with_inferior()
def tmp(host):
cc.set_return_value(ret_val)
cc.do_return()
def do_jmp(self, host, addr, **kwargs):
@host.with_inferior()
def set_pc(host):
self.do_regstate(host, addr, **kwargs)
host.inferior().reg_set_pc(addr.value(host.inferior()))
def do_regstate(self, host, addr, **kwargs):
if 'regstate' in kwargs:
for (reg, val) in kwargs['regstate'].items():
host.inferior().reg_set(reg, val)
def notify_event_run(self, host):
return self.notify_event(RUN, host)
def notify_event_finish(self, host):
return self.notify_event(FINISH, host)
def clear_flags(self):
self.flags = {}
def set_flag(self, name):
self.flags[name] = True
def set_flag_stop(self):
'''
signals that the current inferior should be
aborted and cleaned up. use this flag in a callback
to cause host.run_inferior() to return.
'''
return self.set_flag("stop")
def get_flag(self, name):
if name in self.flags \
and self.flags[name] == True:
return True
return False
def get_flag_stop(self):
return self.get_flag("stop")
def set_flag_finished(self):
'''
once set, this flag shouldnt be
reset by `clear_flags`, so we dont use
the dictionary for this flag
'''
self.flag_finished = True
def get_flag_finished(self):
return self.flag_finished
def each_break_addr(self, inferior):
for addr in self.addr_cbs.keys():
yield addr.value(inferior)
for addr in self.mockups.keys():
yield addr.value(inferior)
for addr in self.function_cbs.keys():
yield addr.value(inferior)
for addr in self.function_mockups.keys():
yield addr.value(inferior)
for addr in self.skip_map.keys():
yield addr.value(inferior)
#-------- helpers -------------
def log_at(self, addr, *args, **kwargs):
if 'locs' in kwargs:
locs_tpl = kwargs['locs']
if 'memfmt' in kwargs:
memfmt = kwargs['memfmt']
else:
memfmt = None
@self.at_addr(addr, locs=locs_tpl)
def tmp(host, *loc_vals):
if 'msg' not in kwargs:
kwargs['msg'] = "at %x" % host.inferior().reg_pc()
host.log("%s:" % kwargs['msg'])
for i in range(len(locs_tpl)):
v = loc_vals[i]
if isinstance(v, str):
if memfmt:
tpl = struct.unpack(memfmt, v)
str_val = " ".join(map(lambda x: "0x%x" % x, tpl))
else:
str_val = mozz.util.bin_to_hex(v)
else:
str_val = "0x%x" % v
loc_name = str(locs_tpl[i])
host.log(" %s = %s" % (loc_name, str_val))
def quit_at(self, addr):
@self.at_addr(addr)
def tmp(host):
self.set_flag_stop()
def trace_function(self, addr, *args, **kwargs):
@self.at_function(addr, *args, **kwargs)
def trace(host, ctx, brks, *arg_vals):
if isinstance(addr, int) or isinstance(addr, long):
fn_name = "0x%x" % addr
else:
fn_name = str(addr)
host.log("at function %s:" % fn_name)
for i in range(len(arg_vals)):
val = arg_vals[i].value()
if isinstance(val, str):
if 'memfmt' in kwargs:
tpl = struct.unpack(kwargs['memfmt'], val)
str_val = " ".join(map(lambda x: "0x%x" % x, tpl))
else:
str_val = mozz.util.bin_to_hex(val)
elif isinstance(val, int) or isinstance(val, long):
strval = "0x%x" % val
else:
strval = repr(val)
host.log(" arg%d: %s" % (i, strval))
| gpl-3.0 |
gustavofonseca/scielo-manager | scielomanager/journalmanager/migrations/0022_fix_nulls_in_notes.py | 3 | 33496 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
orm.Journal.objects.filter(notes=None).update(notes='')
def backwards(self, orm):
"Write your backwards methods here."
orm.Journal.objects.filter(notes='').update(notes=None)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'journalmanager.aheadpressrelease': {
'Meta': {'object_name': 'AheadPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Journal']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.article': {
'Meta': {'object_name': 'Article'},
'aid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'article_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'articles_linkage_is_pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'doi': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '2048', 'db_index': 'True'}),
'domain_key': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'False'}),
'es_is_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'es_updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_aop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'issn_epub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issn_ppub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Issue']"}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'journal_title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'db_index': 'True'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['journalmanager.Article']", 'null': 'True', 'through': "orm['journalmanager.ArticlesLinkage']", 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'xml': ('scielomanager.custom_fields.XMLSPSField', [], {}),
'xml_version': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
'journalmanager.articleslinkage': {
'Meta': {'object_name': 'ArticlesLinkage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'referrers'", 'to': "orm['journalmanager.Article']"}),
'link_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'referrer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links_to'", 'to': "orm['journalmanager.Article']"})
},
'journalmanager.collection': {
'Meta': {'ordering': "['name']", 'object_name': 'Collection'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'name_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.institution': {
'Meta': {'ordering': "['name']", 'object_name': 'Institution'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.issue': {
'Meta': {'ordering': "('created', 'id')", 'object_name': 'Issue'},
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_marked_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'publication_end_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_start_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Section']", 'symmetrical': 'False', 'blank': 'True'}),
'spe_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'suppl_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'total_documents': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '15'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']", 'null': 'True'}),
'volume': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'})
},
'journalmanager.issuetitle': {
'Meta': {'object_name': 'IssueTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Issue']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.journal': {
'Meta': {'ordering': "('title', 'id')", 'object_name': 'Journal'},
'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}),
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'ccn_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'through': "orm['journalmanager.Membership']", 'symmetrical': 'False'}),
'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'cover': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'current_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'editor_journal'", 'null': 'True', 'to': "orm['auth.User']"}),
'editor_address': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_address_city': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'editor_address_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'editor_address_state': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'editor_address_zip': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'editor_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_phone1': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'editor_phone2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'final_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_coverage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'init_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_year': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'is_indexed_aehci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_scie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_ssci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}),
'logo': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'medline_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'medline_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'other_previous_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'previous_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publication_city': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publisher_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'publisher_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'publisher_state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'secs_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}),
'study_areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals_migration_tmp'", 'null': 'True', 'to': "orm['journalmanager.StudyArea']"}),
'subject_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals'", 'null': 'True', 'to': "orm['journalmanager.SubjectCategory']"}),
'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url_journal': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'url_online_submission': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"})
},
'journalmanager.journalmission': {
'Meta': {'object_name': 'JournalMission'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missions'", 'to': "orm['journalmanager.Journal']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True'})
},
'journalmanager.journaltimeline': {
'Meta': {'object_name': 'JournalTimeline'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''"}),
'since': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'journalmanager.journaltitle': {
'Meta': {'object_name': 'JournalTitle'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'other_titles'", 'to': "orm['journalmanager.Journal']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'journalmanager.membership': {
'Meta': {'unique_together': "(('journal', 'collection'),)", 'object_name': 'Membership'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'since': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16'})
},
'journalmanager.pendedform': {
'Meta': {'object_name': 'PendedForm'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'form_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_forms'", 'to': "orm['auth.User']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.pendedvalue': {
'Meta': {'object_name': 'PendedValue'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['journalmanager.PendedForm']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'journalmanager.pressrelease': {
'Meta': {'object_name': 'PressRelease'},
'doi': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'journalmanager.pressreleasearticle': {
'Meta': {'object_name': 'PressReleaseArticle'},
'article_pid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['journalmanager.PressRelease']"})
},
'journalmanager.pressreleasetranslation': {
'Meta': {'object_name': 'PressReleaseTranslation'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['journalmanager.PressRelease']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.regularpressrelease': {
'Meta': {'object_name': 'RegularPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Issue']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.section': {
'Meta': {'ordering': "('id',)", 'object_name': 'Section'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '21', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'legacy_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'journalmanager.sectiontitle': {
'Meta': {'ordering': "['title']", 'object_name': 'SectionTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'titles'", 'to': "orm['journalmanager.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.sponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']},
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}),
'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.studyarea': {
'Meta': {'object_name': 'StudyArea'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.subjectcategory': {
'Meta': {'object_name': 'SubjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'})
},
'journalmanager.translateddata': {
'Meta': {'object_name': 'TranslatedData'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'translation': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'journalmanager.uselicense': {
'Meta': {'ordering': "['license_code']", 'object_name': 'UseLicense'},
'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'journalmanager.usercollections': {
'Meta': {'unique_together': "(('user', 'collection'),)", 'object_name': 'UserCollections'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'journalmanager.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tz': ('django.db.models.fields.CharField', [], {'default': "'America/Sao_Paulo'", 'max_length': '150'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['journalmanager']
symmetrical = True
| bsd-2-clause |
raggesilver/PyIDE | modules/autoBracket.py | 1 | 2199 | import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
gi.require_version('GtkSource', '3.0')
from gi.repository import Gtk, Gdk, GtkSource
class AutoBracket:
def __init__(self, parent):
self.parent = parent
self.sview = parent.sview
self.sbuff = parent.sbuff
self.chars = {
'parenleft': ')',
'bracketleft': ']',
'braceleft': '}',
'quotedbl': '"',
'apostrophe': '\'',
'less': '>'
}
def do_activate(self, *args):
print('Auto Bracket module activated.')
self.sview.connect('event-after', self.complete)
def complete(self, view, event):
self.hasSelection = self.sbuff.props.has_selection
if self.hasSelection:
bounds = self.sbuff.get_selection_bounds()
self.start, self.end = bounds
self.selectionText = self.sbuff.get_text(self.start, self.end, False)
ignore = Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.MOD1_MASK
if (event.type != Gdk.EventType.KEY_PRESS or event.state & ignore or Gdk.keyval_name(event.key.keyval) not in self.chars):
self.hadSelection = self.hasSelection
return
insert = self.get_insert()
closing = self.chars[Gdk.keyval_name(event.key.keyval)]
if Gdk.keyval_name(event.key.keyval) == 'less' and not self.sbuff.get_language() is None and self.sbuff.get_language().get_name().lower() != 'html':
return
if not self.hadSelection and not self.hasSelection:
self.sbuff.begin_user_action()
self.sbuff.insert(insert, closing)
self.sbuff.end_user_action()
insert.backward_chars(1)
self.sbuff.place_cursor(insert)
else:
self.sbuff.begin_user_action()
self.selectionText += closing
self.sbuff.insert(insert, self.selectionText)
self.sbuff.end_user_action()
insert.backward_chars(1)
self.sbuff.place_cursor(insert)
def get_insert(self, *args):
mark = self.sbuff.get_insert()
return self.sbuff.get_iter_at_mark(mark) | mit |
kurikuri99/xen_study | tools/xm-test/tests/vtpm/06_vtpm-susp_res_pcrs.py | 21 | 3565 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2006
# Author: Stefan Berger <[email protected]>
# Positive Test: create domain with virtual TPM attached at build time,
# extend a pcr
# check list of pcrs; suspend and resume the domain and
# check list of pcrs again and validate extended pcr
from XmTestLib import *
from vtpm_utils import *
import commands
import os
import os.path
import atexit
config = {"vtpm":"instance=1,backend=0"}
domain = XmTestDomain(extraConfig=config)
domName = domain.getName()
consoleHistory = ""
try:
console = domain.start()
except DomainError, e:
if verbose:
print e.extra
FAIL("Unable to create domain (%s)" % domName)
atexit.register(vtpm_cleanup, vtpm_get_uuid(domid(domName)))
try:
console.sendInput("input")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL(str(e))
try:
run = console.runCmd("mknod /dev/tpm0 c 10 224")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL("Error while creating /dev/tpm0")
try:
run = console.runCmd("echo -ne \"\\x00\\xc1\\x00\\x00\\x00\\x22\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\0xf\\x10\\x11\\x12\\x13\\x14\" > seq; cat seq > /dev/tpm0")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL("Error while extending PCR 0")
try:
run = console.runCmd("cat /sys/devices/xen/vtpm-0/pcrs")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL("No result from dumping the PCRs")
if re.search("No such file",run["output"]):
FAIL("TPM frontend support not compiled into (domU?) kernel")
if not re.search("PCR-00:",run["output"]):
saveLog(console.getHistory())
FAIL("Virtual TPM is not working correctly on /dev/vtpm on backend side: \n%s" % run["output"])
if not re.search("PCR-00: 1E A7 BD",run["output"]):
saveLog(console.getHistory())
FAIL("Extend did not lead to expected result (1E A7 BD ...): \n%s" % run["output"])
consoleHistory = console.getHistory()
domain.closeConsole()
loop = 0
while loop < 3:
try:
status, ouptut = traceCommand("xm save %s %s.save" %
(domName, domName),
timeout=30)
except TimeoutError, e:
saveLog(consoleHistory)
FAIL(str(e))
if status != 0:
saveLog(consoleHistory)
FAIL("xm save did not succeed")
try:
status, ouptut = traceCommand("xm restore %s.save" %
(domName),
timeout=30)
except TimeoutError, e:
os.remove("%s.save" % domName)
saveLog(consoleHistory)
FAIL(str(e))
os.remove("%s.save" % domName)
if status != 0:
saveLog(consoleHistory)
FAIL("xm restore did not succeed")
try:
console = domain.getConsole()
except ConsoleError, e:
FAIL(str(e))
try:
run = console.runCmd("cat /sys/devices/xen/vtpm-0/pcrs")
except ConsoleError, e:
saveLog(console.getHistory())
FAIL(str(e))
if not re.search("PCR-00:",run["output"]):
saveLog(console.getHistory())
FAIL("Virtual TPM is not working correctly on /dev/vtpm on backend side")
if not re.search("PCR-00: 1E A7 BD",run["output"]):
saveLog(console.getHistory())
FAIL("Virtual TPM lost PCR 0 value: \n%s" % run["output"])
loop += 1
domain.closeConsole()
domain.stop()
| gpl-2.0 |
astrofimov/limbo-android | jni/qemu/roms/seabios/tools/layoutrom.py | 42 | 21567 | #!/usr/bin/env python
# Script to analyze code and arrange ld sections.
#
# Copyright (C) 2008-2010 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys
# LD script headers/trailers
COMMONHEADER = """
/* DO NOT EDIT! This is an autogenerated file. See tools/layoutrom.py. */
OUTPUT_FORMAT("elf32-i386")
OUTPUT_ARCH("i386")
SECTIONS
{
"""
COMMONTRAILER = """
/* Discard regular data sections to force a link error if
* code attempts to access data not marked with VAR16 (or other
* appropriate macro)
*/
/DISCARD/ : {
*(.text*) *(.data*) *(.bss*) *(.rodata*)
*(COMMON) *(.discard*) *(.eh_frame)
}
}
"""
######################################################################
# Determine section locations
######################################################################
# Align 'pos' to 'alignbytes' offset
def alignpos(pos, alignbytes):
mask = alignbytes - 1
return (pos + mask) & ~mask
# Determine the final addresses for a list of sections that end at an
# address.
def setSectionsStart(sections, endaddr, minalign=1):
totspace = 0
for section in sections:
if section.align > minalign:
minalign = section.align
totspace = alignpos(totspace, section.align) + section.size
startaddr = (endaddr - totspace) / minalign * minalign
curaddr = startaddr
# out = [(addr, sectioninfo), ...]
out = []
for section in sections:
curaddr = alignpos(curaddr, section.align)
section.finalloc = curaddr
curaddr += section.size
return startaddr
# The 16bit code can't exceed 64K of space.
BUILD_BIOS_ADDR = 0xf0000
BUILD_BIOS_SIZE = 0x10000
# Layout the 16bit code. This ensures sections with fixed offset
# requirements are placed in the correct location. It also places the
# 16bit code as high as possible in the f-segment.
def fitSections(sections, fillsections):
# fixedsections = [(addr, section), ...]
fixedsections = []
for section in sections:
if section.name.startswith('.fixedaddr.'):
addr = int(section.name[11:], 16)
section.finalloc = addr
fixedsections.append((addr, section))
if section.align != 1:
print "Error: Fixed section %s has non-zero alignment (%d)" % (
section.name, section.align)
sys.exit(1)
fixedsections.sort()
firstfixed = fixedsections[0][0]
# Find freespace in fixed address area
# fixedAddr = [(freespace, section), ...]
fixedAddr = []
for i in range(len(fixedsections)):
fixedsectioninfo = fixedsections[i]
addr, section = fixedsectioninfo
if i == len(fixedsections) - 1:
nextaddr = BUILD_BIOS_SIZE
else:
nextaddr = fixedsections[i+1][0]
avail = nextaddr - addr - section.size
fixedAddr.append((avail, section))
fixedAddr.sort()
# Attempt to fit other sections into fixed area
canrelocate = [(section.size, section.align, section.name, section)
for section in fillsections]
canrelocate.sort()
canrelocate = [section for size, align, name, section in canrelocate]
totalused = 0
for freespace, fixedsection in fixedAddr:
addpos = fixedsection.finalloc + fixedsection.size
totalused += fixedsection.size
nextfixedaddr = addpos + freespace
# print "Filling section %x uses %d, next=%x, available=%d" % (
# fixedsection.finalloc, fixedsection.size, nextfixedaddr, freespace)
while 1:
canfit = None
for fitsection in canrelocate:
if addpos + fitsection.size > nextfixedaddr:
# Can't fit and nothing else will fit.
break
fitnextaddr = alignpos(addpos, fitsection.align) + fitsection.size
# print "Test %s - %x vs %x" % (
# fitsection.name, fitnextaddr, nextfixedaddr)
if fitnextaddr > nextfixedaddr:
# This item can't fit.
continue
canfit = (fitnextaddr, fitsection)
if canfit is None:
break
# Found a section that can fit.
fitnextaddr, fitsection = canfit
canrelocate.remove(fitsection)
fitsection.finalloc = addpos
addpos = fitnextaddr
totalused += fitsection.size
# print " Adding %s (size %d align %d) pos=%x avail=%d" % (
# fitsection[2], fitsection[0], fitsection[1]
# , fitnextaddr, nextfixedaddr - fitnextaddr)
# Report stats
total = BUILD_BIOS_SIZE-firstfixed
slack = total - totalused
print ("Fixed space: 0x%x-0x%x total: %d slack: %d"
" Percent slack: %.1f%%" % (
firstfixed, BUILD_BIOS_SIZE, total, slack,
(float(slack) / total) * 100.0))
return firstfixed
# Return the subset of sections with a given name prefix
def getSectionsPrefix(sections, category, prefix):
return [section for section in sections
if section.category == category and section.name.startswith(prefix)]
def doLayout(sections):
# Determine 16bit positions
textsections = getSectionsPrefix(sections, '16', '.text.')
rodatasections = (
getSectionsPrefix(sections, '16', '.rodata.str1.1')
+ getSectionsPrefix(sections, '16', '.rodata.__func__.')
+ getSectionsPrefix(sections, '16', '.rodata.__PRETTY_FUNCTION__.'))
datasections = getSectionsPrefix(sections, '16', '.data16.')
fixedsections = getSectionsPrefix(sections, '16', '.fixedaddr.')
firstfixed = fitSections(fixedsections, textsections)
remsections = [s for s in textsections+rodatasections+datasections
if s.finalloc is None]
code16_start = setSectionsStart(remsections, firstfixed)
# Determine 32seg positions
textsections = getSectionsPrefix(sections, '32seg', '.text.')
rodatasections = (
getSectionsPrefix(sections, '32seg', '.rodata.str1.1')
+ getSectionsPrefix(sections, '32seg', '.rodata.__func__.')
+ getSectionsPrefix(sections, '32seg', '.rodata.__PRETTY_FUNCTION__.'))
datasections = getSectionsPrefix(sections, '32seg', '.data32seg.')
code32seg_start = setSectionsStart(
textsections + rodatasections + datasections, code16_start)
# Determine 32flat runtime positions
textsections = getSectionsPrefix(sections, '32flat', '.text.')
rodatasections = getSectionsPrefix(sections, '32flat', '.rodata')
datasections = getSectionsPrefix(sections, '32flat', '.data.')
bsssections = getSectionsPrefix(sections, '32flat', '.bss.')
code32flat_start = setSectionsStart(
textsections + rodatasections + datasections + bsssections
, code32seg_start + BUILD_BIOS_ADDR, 16)
# Determine 32flat init positions
textsections = getSectionsPrefix(sections, '32init', '.text.')
rodatasections = getSectionsPrefix(sections, '32init', '.rodata')
datasections = getSectionsPrefix(sections, '32init', '.data.')
bsssections = getSectionsPrefix(sections, '32init', '.bss.')
code32init_start = setSectionsStart(
textsections + rodatasections + datasections + bsssections
, code32flat_start, 16)
# Print statistics
size16 = BUILD_BIOS_SIZE - code16_start
size32seg = code16_start - code32seg_start
size32flat = code32seg_start + BUILD_BIOS_ADDR - code32flat_start
size32init = code32flat_start - code32init_start
print "16bit size: %d" % size16
print "32bit segmented size: %d" % size32seg
print "32bit flat size: %d" % size32flat
print "32bit flat init size: %d" % size32init
######################################################################
# Linker script output
######################################################################
# Write LD script includes for the given cross references
def outXRefs(sections):
xrefs = {}
out = ""
for section in sections:
for reloc in section.relocs:
symbol = reloc.symbol
if (symbol.section is None
or (symbol.section.fileid == section.fileid
and symbol.name == reloc.symbolname)
or reloc.symbolname in xrefs):
continue
xrefs[reloc.symbolname] = 1
addr = symbol.section.finalloc + symbol.offset
if (section.fileid == '32flat'
and symbol.section.fileid in ('16', '32seg')):
addr += BUILD_BIOS_ADDR
out += "%s = 0x%x ;\n" % (reloc.symbolname, addr)
return out
# Write LD script includes for the given sections using relative offsets
def outRelSections(sections, startsym):
out = ""
for section in sections:
out += ". = ( 0x%x - %s ) ;\n" % (section.finalloc, startsym)
if section.name == '.rodata.str1.1':
out += "_rodata = . ;\n"
out += "*(%s)\n" % (section.name,)
return out
def getSectionsFile(sections, fileid, defaddr=0):
sections = [(section.finalloc, section)
for section in sections if section.fileid == fileid]
sections.sort()
sections = [section for addr, section in sections]
pos = defaddr
if sections:
pos = sections[0].finalloc
return sections, pos
# Layout the 32bit segmented code. This places the code as high as possible.
def writeLinkerScripts(sections, entrysym, genreloc, out16, out32seg, out32flat):
# Write 16bit linker script
sections16, code16_start = getSectionsFile(sections, '16')
output = open(out16, 'wb')
output.write(COMMONHEADER + outXRefs(sections16) + """
code16_start = 0x%x ;
.text16 code16_start : {
""" % (code16_start)
+ outRelSections(sections16, 'code16_start')
+ """
}
"""
+ COMMONTRAILER)
output.close()
# Write 32seg linker script
sections32seg, code32seg_start = getSectionsFile(
sections, '32seg', code16_start)
output = open(out32seg, 'wb')
output.write(COMMONHEADER + outXRefs(sections32seg) + """
code32seg_start = 0x%x ;
.text32seg code32seg_start : {
""" % (code32seg_start)
+ outRelSections(sections32seg, 'code32seg_start')
+ """
}
"""
+ COMMONTRAILER)
output.close()
# Write 32flat linker script
sections32flat, code32flat_start = getSectionsFile(
sections, '32flat', code32seg_start)
relocstr = ""
relocminalign = 0
if genreloc:
# Generate relocations
relocstr, size, relocminalign = genRelocs(sections)
code32flat_start -= size
output = open(out32flat, 'wb')
output.write(COMMONHEADER
+ outXRefs(sections32flat) + """
%s = 0x%x ;
_reloc_min_align = 0x%x ;
code32flat_start = 0x%x ;
.text code32flat_start : {
""" % (entrysym.name,
entrysym.section.finalloc + entrysym.offset + BUILD_BIOS_ADDR,
relocminalign, code32flat_start)
+ relocstr
+ """
code32init_start = ABSOLUTE(.) ;
"""
+ outRelSections(getSectionsPrefix(sections32flat, '32init', '')
, 'code32flat_start')
+ """
code32init_end = ABSOLUTE(.) ;
"""
+ outRelSections(getSectionsPrefix(sections32flat, '32flat', '')
, 'code32flat_start')
+ """
. = ( 0x%x - code32flat_start ) ;
*(.text32seg)
. = ( 0x%x - code32flat_start ) ;
*(.text16)
code32flat_end = ABSOLUTE(.) ;
} :text
""" % (code32seg_start + BUILD_BIOS_ADDR, code16_start + BUILD_BIOS_ADDR)
+ COMMONTRAILER
+ """
ENTRY(%s)
PHDRS
{
text PT_LOAD AT ( code32flat_start ) ;
}
""" % (entrysym.name,))
output.close()
######################################################################
# Detection of init code
######################################################################
# Determine init section relocations
def genRelocs(sections):
absrelocs = []
relrelocs = []
initrelocs = []
minalign = 16
for section in sections:
if section.category == '32init' and section.align > minalign:
minalign = section.align
for reloc in section.relocs:
symbol = reloc.symbol
if symbol.section is None:
continue
relocpos = section.finalloc + reloc.offset
if (reloc.type == 'R_386_32' and section.category == '32init'
and symbol.section.category == '32init'):
# Absolute relocation
absrelocs.append(relocpos)
elif (reloc.type == 'R_386_PC32' and section.category == '32init'
and symbol.section.category != '32init'):
# Relative relocation
relrelocs.append(relocpos)
elif (section.category != '32init'
and symbol.section.category == '32init'):
# Relocation to the init section
if section.fileid in ('16', '32seg'):
relocpos += BUILD_BIOS_ADDR
initrelocs.append(relocpos)
absrelocs.sort()
relrelocs.sort()
initrelocs.sort()
out = (" _reloc_abs_start = ABSOLUTE(.) ;\n"
+ "".join(["LONG(0x%x - code32init_start)\n" % (pos,)
for pos in absrelocs])
+ " _reloc_abs_end = ABSOLUTE(.) ;\n"
+ " _reloc_rel_start = ABSOLUTE(.) ;\n"
+ "".join(["LONG(0x%x - code32init_start)\n" % (pos,)
for pos in relrelocs])
+ " _reloc_rel_end = ABSOLUTE(.) ;\n"
+ " _reloc_init_start = ABSOLUTE(.) ;\n"
+ "".join(["LONG(0x%x - code32flat_start)\n" % (pos,)
for pos in initrelocs])
+ " _reloc_init_end = ABSOLUTE(.) ;\n")
return out, len(absrelocs + relrelocs + initrelocs) * 4, minalign
def markRuntime(section, sections):
if (section is None or not section.keep or section.category is not None
or '.init.' in section.name or section.fileid != '32flat'):
return
section.category = '32flat'
# Recursively mark all sections this section points to
for reloc in section.relocs:
markRuntime(reloc.symbol.section, sections)
def findInit(sections):
# Recursively find and mark all "runtime" sections.
for section in sections:
if '.runtime.' in section.name or '.export.' in section.name:
markRuntime(section, sections)
for section in sections:
if section.category is not None:
continue
if section.fileid == '32flat':
section.category = '32init'
else:
section.category = section.fileid
######################################################################
# Section garbage collection
######################################################################
CFUNCPREFIX = [('_cfunc16_', 0), ('_cfunc32seg_', 1), ('_cfunc32flat_', 2)]
# Find and keep the section associated with a symbol (if available).
def keepsymbol(reloc, infos, pos, isxref):
symbolname = reloc.symbolname
mustbecfunc = 0
for symprefix, needpos in CFUNCPREFIX:
if symbolname.startswith(symprefix):
if needpos != pos:
return -1
symbolname = symbolname[len(symprefix):]
mustbecfunc = 1
break
symbol = infos[pos][1].get(symbolname)
if (symbol is None or symbol.section is None
or symbol.section.name.startswith('.discard.')):
return -1
isdestcfunc = (symbol.section.name.startswith('.text.')
and not symbol.section.name.startswith('.text.asm.'))
if ((mustbecfunc and not isdestcfunc)
or (not mustbecfunc and isdestcfunc and isxref)):
return -1
reloc.symbol = symbol
keepsection(symbol.section, infos, pos)
return 0
# Note required section, and recursively set all referenced sections
# as required.
def keepsection(section, infos, pos=0):
if section.keep:
# Already kept - nothing to do.
return
section.keep = 1
# Keep all sections that this section points to
for reloc in section.relocs:
ret = keepsymbol(reloc, infos, pos, 0)
if not ret:
continue
# Not in primary sections - it may be a cross 16/32 reference
ret = keepsymbol(reloc, infos, (pos+1)%3, 1)
if not ret:
continue
ret = keepsymbol(reloc, infos, (pos+2)%3, 1)
if not ret:
continue
# Determine which sections are actually referenced and need to be
# placed into the output file.
def gc(info16, info32seg, info32flat):
# infos = ((sections16, symbols16), (sect32seg, sym32seg)
# , (sect32flat, sym32flat))
infos = (info16, info32seg, info32flat)
# Start by keeping sections that are globally visible.
for section in info16[0]:
if section.name.startswith('.fixedaddr.') or '.export.' in section.name:
keepsection(section, infos)
return [section for section in info16[0]+info32seg[0]+info32flat[0]
if section.keep]
######################################################################
# Startup and input parsing
######################################################################
class Section:
name = size = alignment = fileid = relocs = None
finalloc = category = keep = None
class Reloc:
offset = type = symbolname = symbol = None
class Symbol:
name = offset = section = None
# Read in output from objdump
def parseObjDump(file, fileid):
# sections = [section, ...]
sections = []
sectionmap = {}
# symbols[symbolname] = symbol
symbols = {}
state = None
for line in file.readlines():
line = line.rstrip()
if line == 'Sections:':
state = 'section'
continue
if line == 'SYMBOL TABLE:':
state = 'symbol'
continue
if line.startswith('RELOCATION RECORDS FOR ['):
sectionname = line[24:-2]
if sectionname.startswith('.debug_'):
# Skip debugging sections (to reduce parsing time)
state = None
continue
state = 'reloc'
relocsection = sectionmap[sectionname]
continue
if state == 'section':
try:
idx, name, size, vma, lma, fileoff, align = line.split()
if align[:3] != '2**':
continue
section = Section()
section.name = name
section.size = int(size, 16)
section.align = 2**int(align[3:])
section.fileid = fileid
section.relocs = []
sections.append(section)
sectionmap[name] = section
except ValueError:
pass
continue
if state == 'symbol':
try:
sectionname, size, name = line[17:].split()
symbol = Symbol()
symbol.size = int(size, 16)
symbol.offset = int(line[:8], 16)
symbol.name = name
symbol.section = sectionmap.get(sectionname)
symbols[name] = symbol
except ValueError:
pass
continue
if state == 'reloc':
try:
off, type, symbolname = line.split()
reloc = Reloc()
reloc.offset = int(off, 16)
reloc.type = type
reloc.symbolname = symbolname
reloc.symbol = symbols.get(symbolname)
if reloc.symbol is None:
# Some binutils (2.20.1) give section name instead
# of a symbol - create a dummy symbol.
reloc.symbol = symbol = Symbol()
symbol.size = 0
symbol.offset = 0
symbol.name = symbolname
symbol.section = sectionmap.get(symbolname)
symbols[symbolname] = symbol
relocsection.relocs.append(reloc)
except ValueError:
pass
return sections, symbols
def main():
# Get output name
in16, in32seg, in32flat, out16, out32seg, out32flat = sys.argv[1:]
# Read in the objdump information
infile16 = open(in16, 'rb')
infile32seg = open(in32seg, 'rb')
infile32flat = open(in32flat, 'rb')
# infoX = (sections, symbols)
info16 = parseObjDump(infile16, '16')
info32seg = parseObjDump(infile32seg, '32seg')
info32flat = parseObjDump(infile32flat, '32flat')
# Figure out which sections to keep.
sections = gc(info16, info32seg, info32flat)
# Separate 32bit flat into runtime and init parts
findInit(sections)
# Determine the final memory locations of each kept section.
doLayout(sections)
# Write out linker script files.
entrysym = info16[1]['entry_elf']
genreloc = '_reloc_abs_start' in info32flat[1]
writeLinkerScripts(sections, entrysym, genreloc, out16, out32seg, out32flat)
if __name__ == '__main__':
main()
| gpl-2.0 |
timhuanggithub/MyPOX | pox/openflow/libopenflow_01.py | 1 | 131719 | # Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was originally based on pyopenflow.py from NOX, which was
# autogenerated from openflow.h via a program by KK Yap. It has been
# substantially altered since then.
from __future__ import print_function
import struct
import operator
from itertools import chain, repeat
import sys
from pox.lib.packet.packet_base import packet_base
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.vlan import vlan
from pox.lib.packet.llc import llc
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.udp import udp
from pox.lib.packet.tcp import tcp
from pox.lib.packet.icmp import icmp
from pox.lib.packet.arp import arp
from pox.lib.addresses import *
from pox.lib.util import assert_type
from pox.lib.util import initHelper
from pox.lib.util import hexdump
from pox.lib.util import is_listlike
EMPTY_ETH = EthAddr(None)
# ----------------------------------------------------------------------
# Logging
# ----------------------------------------------------------------------
_logger = None
def _log (debug=None, info=None, warn=None, error=None):
if not _logger: return
if debug: _logger.debug(debug)
if info: _logger.info(info)
if warn: _logger.warn(warn)
if error: _logger.error(error)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# XID Management
# ----------------------------------------------------------------------
MAX_XID = 0x7fFFffFF
def XIDGenerator (start = 1, stop = MAX_XID):
i = start
while True:
yield i
i += 1
if i > stop:
i = start
def xid_generator (start = 1, stop = MAX_XID):
return XIDGenerator(start, stop).next
def user_xid_generator ():
return xid_generator(0x80000000, 0xffFFffFF)
generate_xid = xid_generator()
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Packing / Unpacking
# ----------------------------------------------------------------------
_PAD = b'\x00'
_PAD2 = _PAD*2
_PAD3 = _PAD*3
_PAD4 = _PAD*4
_PAD6 = _PAD*6
class UnderrunError (RuntimeError):
"""
Raised when one tries to unpack more data than is available
"""
pass
def _read (data, offset, length):
if (len(data)-offset) < length:
raise UnderrunError("wanted %s bytes but only have %s"
% (length, len(data)-offset))
return (offset+length, data[offset:offset+length])
def _unpack (fmt, data, offset):
size = struct.calcsize(fmt)
if (len(data)-offset) < size: raise UnderrunError()
return (offset+size, struct.unpack_from(fmt, data, offset))
def _skip (data, offset, num):
offset += num
if offset > len(data): raise UnderrunError()
return offset
def _unpad (data, offset, num):
(offset, o) = _read(data, offset, num)
assert len(o.replace(b"\x00", b"")) == 0
return offset
def _readzs (data, offset, length):
(offset, d) = _read(data, offset, length)
d = d.split(b"\x00", 1)
#if len(d[1].replace(b"\x00", b"")) > 0:
# raise RuntimeError("Non-zero string padding")
assert True if (len(d) == 1) else (len(d[1].replace(b"\x00", b"")) == 0)
return (offset, d[0])
def _readether (data, offset):
(offset, d) = _read(data, offset, 6)
return (offset, EthAddr(d))
def _readip (data, offset, networkOrder = True):
(offset, d) = _read(data, offset, 4)
return (offset, IPAddr(d, networkOrder = networkOrder))
# ----------------------------------------------------------------------
def _format_body (body, prefix):
if hasattr(body, 'show'):
#TODO: Check this (spacing may well be wrong)
return body.show(prefix + ' ')
else:
return prefix + hexdump(body).replace("\n", "\n" + prefix)
TABLE_ALL = 0xff
TABLE_EMERGENCY = 0xfe
class _ofp_meta (type):
"""
Metaclass for ofp messages/structures
This takes care of making len() work as desired.
"""
def __len__ (cls):
try:
return cls.__len__()
except:
return cls._MIN_LENGTH
class ofp_base (object):
"""
Base class for OpenFlow messages/structures
You should implement a __len__ method. If your length is fixed, it
should be a static method. If your length is not fixed, you should
implement a __len__ instance method and set a class level _MIN_LENGTH
attribute to your minimum length.
"""
__metaclass__ = _ofp_meta
def _assert (self):
r = self._validate()
if r is not None:
raise RuntimeError(r)
return False # Never reached
return True
def _validate (self):
return None
def __ne__ (self, other):
return not self.__eq__(other)
@classmethod
def unpack_new (cls, raw, offset=0):
"""
Unpacks wire format into the appropriate message object.
Returns newoffset,object
"""
o = cls()
r,length = o.unpack(raw, offset)
assert (r-offset) == length, o
return (r, o)
def clone (self):
# Works for any packable+unpackable ofp_base subclass.
# Can override if you have a better implementation
return type(self).unpack_new(self.pack())[1]
# ----------------------------------------------------------------------
# Class decorators
# ----------------------------------------------------------------------
_message_type_to_class = {}
_message_class_to_types = {} # Do we need this?
#_message_type_to_name = {}
#_message_name_to_type = {}
ofp_type_rev_map = {}
ofp_type_map = {}
def openflow_message (ofp_type, type_val, reply_to=None,
request_for=None, switch=False, controller=False):
#TODO: Reply stuff, switch/controller stuff
#_message_name_to_type[ofp_type] = type_val
#_message_type_to_name[type_val] = ofp_type
ofp_type_rev_map[ofp_type] = type_val
ofp_type_map[type_val] = ofp_type
def f (c):
c.header_type = type_val
c._from_switch = switch
c._from_controller = controller
_message_type_to_class[type_val] = c
_message_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
def openflow_sc_message (*args, **kw):
return openflow_message(switch=True, controller=True, *args, **kw)
def openflow_c_message (*args, **kw):
return openflow_message(controller=True, *args, **kw)
def openflow_s_message (*args, **kw):
return openflow_message(switch=True, *args, **kw)
_queue_prop_type_to_class = {}
_queue_prop_class_to_types = {} # Do we need this?
ofp_queue_prop_type_rev_map = {}
ofp_queue_prop_type_map = {}
def openflow_queue_prop (queue_prop_type, type_val):
ofp_queue_prop_type_rev_map[queue_prop_type] = type_val
ofp_queue_prop_type_map[type_val] = queue_prop_type
def f (c):
c.property = type_val
_queue_prop_type_to_class[type_val] = c
_queue_prop_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
_action_type_to_class = {}
_action_class_to_types = {} # Do we need this?
ofp_action_type_rev_map = {}
ofp_action_type_map = {}
def openflow_action (action_type, type_val):
ofp_action_type_rev_map[action_type] = type_val
ofp_action_type_map[type_val] = action_type
def f (c):
c.type = type_val
_action_type_to_class[type_val] = c
_action_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
class _StatsClassInfo (object):
__slots__ = 'request reply reply_is_list'.split()
def __init__ (self, **kw):
self.request = None
self.reply = None
self.reply_is_list = False
initHelper(self, kw)
def __str__ (self):
r = str(self.reply)
if self.reply_is_list: r = "[%s]" % (r,)
return "request:%s reply:%s" % (self.request, r)
_stats_type_to_class_info = {}
_stats_class_to_type = {}
ofp_stats_type_rev_map = {}
ofp_stats_type_map = {}
def openflow_stats_request (stats_type, type_val=None, is_list=None,
is_reply = False):
if type_val is not None:
ofp_stats_type_rev_map[stats_type] = type_val
ofp_stats_type_map[type_val] = stats_type
else:
type_val = ofp_stats_type_rev_map.get(stats_type)
def f (c):
if type_val is not None:
ti = _stats_type_to_class_info.get(stats_type)
if ti is not None:
_stats_type_to_class_info[type_val] = ti
del _stats_type_to_class_info[stats_type]
else:
ti = _stats_type_to_class_info.setdefault(type_val,
_StatsClassInfo())
_stats_class_to_type[c] = type_val
else:
ti = _stats_type_to_class_info.setdefault(stats_type,
_StatsClassInfo())
if is_list is not None:
ti.reply_is_list = is_list
if is_reply:
ti.reply = c
else:
ti.request = c
if type_val is not None:
yes = False
if ti.reply is not None and issubclass(ti.reply,ofp_stats_body_base):
ti.reply._type = type_val
yes = True
if ti.request is not None and issubclass(ti.request,ofp_stats_body_base):
ti.request._type = type_val
yes = True
assert yes, "Type not set for " + str(stats_type)
return c
return f
def openflow_stats_reply (stats_type, type_val=None, is_list=None,
is_reply = True):
return openflow_stats_request(stats_type, type_val, is_list, is_reply)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Constants, etc.
# ----------------------------------------------------------------------
ofp_error_type_rev_map = {
'OFPET_HELLO_FAILED' : 0,
'OFPET_BAD_REQUEST' : 1,
'OFPET_BAD_ACTION' : 2,
'OFPET_FLOW_MOD_FAILED' : 3,
'OFPET_PORT_MOD_FAILED' : 4,
'OFPET_QUEUE_OP_FAILED' : 5,
}
ofp_hello_failed_code_rev_map = {
'OFPHFC_INCOMPATIBLE' : 0,
'OFPHFC_EPERM' : 1,
}
ofp_bad_request_code_rev_map = {
'OFPBRC_BAD_VERSION' : 0,
'OFPBRC_BAD_TYPE' : 1,
'OFPBRC_BAD_STAT' : 2,
'OFPBRC_BAD_VENDOR' : 3,
'OFPBRC_BAD_SUBTYPE' : 4,
'OFPBRC_EPERM' : 5,
'OFPBRC_BAD_LEN' : 6,
'OFPBRC_BUFFER_EMPTY' : 7,
'OFPBRC_BUFFER_UNKNOWN' : 8,
}
ofp_bad_action_code_rev_map = {
'OFPBAC_BAD_TYPE' : 0,
'OFPBAC_BAD_LEN' : 1,
'OFPBAC_BAD_VENDOR' : 2,
'OFPBAC_BAD_VENDOR_TYPE' : 3,
'OFPBAC_BAD_OUT_PORT' : 4,
'OFPBAC_BAD_ARGUMENT' : 5,
'OFPBAC_EPERM' : 6,
'OFPBAC_TOO_MANY' : 7,
'OFPBAC_BAD_QUEUE' : 8,
}
ofp_flow_mod_failed_code_rev_map = {
'OFPFMFC_ALL_TABLES_FULL' : 0,
'OFPFMFC_OVERLAP' : 1,
'OFPFMFC_EPERM' : 2,
'OFPFMFC_BAD_EMERG_TIMEOUT' : 3,
'OFPFMFC_BAD_COMMAND' : 4,
'OFPFMFC_UNSUPPORTED' : 5,
}
ofp_port_mod_failed_code_rev_map = {
'OFPPMFC_BAD_PORT' : 0,
'OFPPMFC_BAD_HW_ADDR' : 1,
}
ofp_queue_op_failed_code_rev_map = {
'OFPQOFC_BAD_PORT' : 0,
'OFPQOFC_BAD_QUEUE' : 1,
'OFPQOFC_EPERM' : 2,
}
ofp_port_config_rev_map = {
'OFPPC_PORT_DOWN' : 1,
'OFPPC_NO_STP' : 2,
'OFPPC_NO_RECV' : 4,
'OFPPC_NO_RECV_STP' : 8,
'OFPPC_NO_FLOOD' : 16,
'OFPPC_NO_FWD' : 32,
'OFPPC_NO_PACKET_IN' : 64,
}
ofp_port_state_rev_map = {
'OFPPS_STP_LISTEN' : 0,
'OFPPS_LINK_DOWN' : 1,
'OFPPS_STP_LEARN' : 256,
'OFPPS_STP_FORWARD' : 512,
'OFPPS_STP_BLOCK' : 768,
}
OFPPS_STP_MASK = 768
ofp_port_features_rev_map = {
'OFPPF_10MB_HD' : 1,
'OFPPF_10MB_FD' : 2,
'OFPPF_100MB_HD' : 4,
'OFPPF_100MB_FD' : 8,
'OFPPF_1GB_HD' : 16,
'OFPPF_1GB_FD' : 32,
'OFPPF_10GB_FD' : 64,
'OFPPF_COPPER' : 128,
'OFPPF_FIBER' : 256,
'OFPPF_AUTONEG' : 512,
'OFPPF_PAUSE' : 1024,
'OFPPF_PAUSE_ASYM' : 2048,
}
ofp_queue_properties_rev_map = {
'OFPQT_MIN_RATE' : 0,
}
OFPQT_NONE = 0
ofp_capabilities_rev_map = {
'OFPC_FLOW_STATS' : 1,
'OFPC_TABLE_STATS' : 2,
'OFPC_PORT_STATS' : 4,
'OFPC_STP' : 8,
'OFPC_RESERVED' : 16,
'OFPC_IP_REASM' : 32,
'OFPC_QUEUE_STATS' : 64,
'OFPC_ARP_MATCH_IP' : 128,
}
ofp_config_flags_rev_map = {
'OFPC_FRAG_NORMAL' : 0,
'OFPC_FRAG_DROP' : 1,
'OFPC_FRAG_REASM' : 2,
'OFPC_FRAG_MASK' : 3,
}
ofp_flow_mod_command_rev_map = {
'OFPFC_ADD' : 0,
'OFPFC_MODIFY' : 1,
'OFPFC_MODIFY_STRICT' : 2,
'OFPFC_DELETE' : 3,
'OFPFC_DELETE_STRICT' : 4,
}
ofp_flow_mod_flags_rev_map = {
'OFPFF_SEND_FLOW_REM' : 1,
'OFPFF_CHECK_OVERLAP' : 2,
'OFPFF_EMERG' : 4,
}
ofp_stats_reply_flags_rev_map = {
'OFPSF_REPLY_MORE' : 1,
}
ofp_packet_in_reason_rev_map = {
'OFPR_NO_MATCH' : 0,
'OFPR_ACTION' : 1,
}
ofp_flow_removed_reason_rev_map = {
'OFPRR_IDLE_TIMEOUT' : 0,
'OFPRR_HARD_TIMEOUT' : 1,
'OFPRR_DELETE' : 2,
}
ofp_port_reason_rev_map = {
'OFPPR_ADD' : 0,
'OFPPR_DELETE' : 1,
'OFPPR_MODIFY' : 2,
}
ofp_port_rev_map = {
'OFPP_MAX' : 65280,
'OFPP_IN_PORT' : 65528,
'OFPP_TABLE' : 65529,
'OFPP_NORMAL' : 65530,
'OFPP_FLOOD' : 65531,
'OFPP_ALL' : 65532,
'OFPP_CONTROLLER' : 65533,
'OFPP_LOCAL' : 65534,
'OFPP_NONE' : 65535,
}
ofp_flow_wildcards_rev_map = {
'OFPFW_IN_PORT' : 1,
'OFPFW_DL_VLAN' : 2,
'OFPFW_DL_SRC' : 4,
'OFPFW_DL_DST' : 8,
'OFPFW_DL_TYPE' : 16,
'OFPFW_NW_PROTO' : 32,
'OFPFW_TP_SRC' : 64,
'OFPFW_TP_DST' : 128,
'OFPFW_DL_VLAN_PCP' : 1048576,
'OFPFW_NW_TOS' : 1<<21,
}
OFPFW_NW_DST_BITS = 6
OFPFW_NW_SRC_BITS = 6
OFPFW_NW_SRC_SHIFT = 8
OFPFW_NW_DST_SHIFT = 14
OFPFW_NW_SRC_ALL = 8192
OFPFW_NW_SRC_MASK = 16128
OFPFW_NW_DST_ALL = 524288
OFPFW_NW_DST_MASK = 1032192
# Note: Need to handle all flags that are set in this.
# glob-all masks in the packet handling methods.
# (Esp. ofp_match.from_packet)
# Otherwise, packets are not being matched as they should
OFPFW_ALL = ((1 << 22) - 1)
NO_BUFFER = 4294967295
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Structure definitions
# ----------------------------------------------------------------------
#1. Openflow Header
class ofp_header (ofp_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.version = OFP_VERSION
#self.header_type = None # Set via class decorator
self._xid = None
if 'header_type' in kw:
self.header_type = kw.pop('header_type')
initHelper(self, kw)
@property
def xid (self):
if self._xid is None:
self._xid = generate_xid()
return self._xid
@xid.setter
def xid (self, val):
self._xid = val
def _validate (self):
if self.header_type not in ofp_type_map:
return "type is not a known message type"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!BBHL", self.version, self.header_type,
len(self), self.xid)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
return offset,length
def _unpack_header (self, raw, offset):
offset,(self.version, self.header_type, length, self.xid) = \
_unpack("!BBHL", raw, offset)
return offset,length
def __eq__ (self, other):
if type(self) != type(other): return False
if self.version != other.version: return False
if self.header_type != other.header_type: return False
if len(self) != len(other): return False
if self.xid != other.xid: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'version: ' + str(self.version) + '\n'
outstr += prefix + 'type: ' + str(self.header_type)# + '\n'
outstr += " (" + ofp_type_map.get(self.header_type, "Unknown") + ")\n"
try:
outstr += prefix + 'length: ' + str(len(self)) + '\n'
except:
pass
outstr += prefix + 'xid: ' + str(self.xid) + '\n'
return outstr
def __str__ (self):
return self.__class__.__name__ + "\n " + self.show(' ').strip()
class ofp_stats_body_base (ofp_base):
"""
Base class for stats bodies
"""
# Stats bodies don't actually have a type field in OpenFlow --
# the type information is in the request or reply. It's really
# convenient, though, so we add it. Note that you generally
# don't need to set this yourself -- the openflow_stats_XXX
# decorator will do it for you.
_type = None
"""
def unpack (self, data, offset=0, avail=None):
"""
class ofp_action_base (ofp_base):
"""
Base class for actions
This is sort of the equivalent of ofp_action_header in the spec.
However, ofp_action_header as the spec defines it is not super
useful for us, as it has the padding in it.
"""
type = None
@classmethod
def unpack_new (cls, raw, offset=0):
"""
Unpacks wire format into the appropriate action object.
Returns newoffset,object
"""
o = cls()
r = o.unpack(raw, offset)
assert (r-offset) == len(o), o
return (r, o)
class ofp_queue_prop_base (ofp_base):
"""
Base class for queue properties
This is sort of the equivalent of ofp_queue_prop_header in the spec.
However, ofp_queue_prop_header as the spec defines it is not super
useful for us, as it has the padding in it.
"""
property = None
#2. Common Structures
##2.1 Port Structures
class ofp_phy_port (ofp_base):
def __init__ (self, **kw):
self.port_no = 0
self.hw_addr = EMPTY_ETH
self.name = ""
self.config = 0
self.state = 0
self.curr = 0
self.advertised = 0
self.supported = 0
self.peer = 0
initHelper(self, kw)
def enable_config (self, mask):
"""
Turn on selected config bits
"""
return self.set_config(0xffFFffFF, mask)
def disable_config (self, mask):
"""
Turn off selected config bits
"""
return self.set_config(0, mask)
def set_config (self, config, mask):
"""
Updates the specified config bits
Returns which bits were changed
"""
old = self.config
self.config &= ~mask
self.config |= config
return old ^ self.config
def __str__ (self):
return "%s:%i" % (self.name, self.port_no)
def _validate (self):
if isinstance(self.hw_addr, bytes) and len(self.hw_addr) == 6:
pass
elif not isinstance(self.hw_addr, EthAddr):
return "hw_addr is not a valid format"
if len(self.name) > OFP_MAX_PORT_NAME_LEN:
return "name is too long"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += (self.hw_addr if isinstance(self.hw_addr, bytes) else
self.hw_addr.toRaw())
packed += self.name.ljust(OFP_MAX_PORT_NAME_LEN,'\0')
packed += struct.pack("!LLLLLL", self.config, self.state, self.curr,
self.advertised, self.supported, self.peer)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset,self.hw_addr = _readether(raw, offset)
offset,self.name = _readzs(raw, offset, OFP_MAX_PORT_NAME_LEN)
offset,(self.config, self.state, self.curr, self.advertised,
self.supported, self.peer) = _unpack("!LLLLLL", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 48
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.name != other.name: return False
if self.config != other.config: return False
if self.state != other.state: return False
if self.curr != other.curr: return False
if self.advertised != other.advertised: return False
if self.supported != other.supported: return False
if self.peer != other.peer: return False
return True
def __cmp__ (self, other):
if type(other) != type(self): return id(self)-id(other)
if self.port_no < other.port_no: return -1
if self.port_no > other.port_no: return 1
if self == other: return 0
return id(self)-id(other)
def __hash__(self, *args, **kwargs):
return hash(self.port_no) ^ hash(self.hw_addr) ^ \
hash(self.name) ^ hash(self.config) ^ \
hash(self.state) ^ hash(self.curr) ^ \
hash(self.advertised) ^ hash(self.supported) + \
hash(self.peer)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'hw_addr: ' + str(EthAddr(self.hw_addr)) + '\n'
outstr += prefix + 'name: ' + str(self.name) + '\n'
outstr += prefix + 'config: ' + str(self.config) + '\n'
outstr += prefix + 'state: ' + str(self.state) + '\n'
outstr += prefix + 'curr: ' + str(self.curr) + '\n'
outstr += prefix + 'advertised: ' + str(self.advertised) + '\n'
outstr += prefix + 'supported: ' + str(self.supported) + '\n'
outstr += prefix + 'peer: ' + str(self.peer) + '\n'
return outstr
def __repr__(self):
return self.show()
##2.2 Queue Structures
class ofp_packet_queue (ofp_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.queue_id = 0
self.properties = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!LH", self.queue_id, len(self))
packed += _PAD2 # Pad
for i in self.properties:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.queue_id, length) = _unpack("!LH", raw, offset)
offset = _skip(raw, offset, 2)
length -= (4 + 2 + 2)
offset,self.properties = _unpack_queue_props(raw, length, offset)
assert offset - _offset == len(self)
return offset
def __len__ (self):
l = 8
for i in self.properties:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if self.queue_id != other.queue_id: return False
if len(self) != len(other): return False
if self.properties != other.properties: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'properties: \n'
for obj in self.properties:
outstr += obj.show(prefix + ' ')
return outstr
class ofp_queue_prop_generic (ofp_queue_prop_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.property = None # Purposely bad
self.data = _PAD4
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.property, len(self))
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.property, length) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length-4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 4 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.property != other.property: return False
if len(self) != len(other): return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'property: ' + str(self.property) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_queue_prop('OFPQT_NONE', 0)
class ofp_queue_prop_none (ofp_queue_prop_generic):
pass
@openflow_queue_prop('OFPQT_MIN_RATE', 1)
class ofp_queue_prop_min_rate (ofp_base):
def __init__ (self, **kw):
self.rate = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.property, len(self))
packed += _PAD4
packed += struct.pack("!H", self.rate)
packed += _PAD6
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.property, length, pad) = \
_unpack("!HHL", raw, offset)
offset,(self.rate,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.property != other.property: return False
if self.rate != other.rate: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'property: ' + str(self.property) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'rate: ' + str(self.rate) + '\n'
return outstr
##2.3 Flow Match Structures
class ofp_match (ofp_base):
adjust_wildcards = True # Set to true to "fix" outgoing wildcards
@classmethod
def from_packet (cls, packet, in_port = None, spec_frags = False):
"""
Constructs an exact match for the given packet
@param in_port The switch port the packet arrived on if you want
the resulting match to have its in_port set.
If "packet" is a packet_in, this is ignored.
@param packet A pox.packet.ethernet instance or a packet_in
@param spec_frags Handle IP fragments as specified in the spec.
"""
if isinstance(packet, ofp_packet_in):
in_port = packet.in_port
packet = ethernet(packet.data)
assert assert_type("packet", packet, ethernet, none_ok=False)
match = cls()
if in_port is not None:
match.in_port = in_port
match.dl_src = packet.src
match.dl_dst = packet.dst
match.dl_type = packet.type
p = packet.next
# Is this in the spec?
if packet.type < 1536:
match.dl_type = OFP_DL_TYPE_NOT_ETH_TYPE
# LLC then VLAN? VLAN then LLC?
if isinstance(p, llc):
if p.has_snap and p.oui == '\0\0\0':
match.dl_type = p.eth_type
p = p.next
if isinstance(p, vlan):
match.dl_type = p.eth_type
match.dl_vlan = p.id
match.dl_vlan_pcp = p.pcp
p = p.next
else:
match.dl_vlan = OFP_VLAN_NONE
match.dl_vlan_pcp = 0
if isinstance(p, ipv4):
match.nw_src = p.srcip
match.nw_dst = p.dstip
match.nw_proto = p.protocol
match.nw_tos = p.tos
if spec_frags and ((p.flags & p.MF_FLAG) or p.frag != 0):
# This seems a bit strange, but see page 9 of the spec.
match.tp_src = 0
match.tp_dst = 0
return match
p = p.next
if isinstance(p, udp) or isinstance(p, tcp):
match.tp_src = p.srcport
match.tp_dst = p.dstport
elif isinstance(p, icmp):
match.tp_src = p.type
match.tp_dst = p.code
elif isinstance(p, arp):
if p.opcode <= 255:
match.nw_proto = p.opcode
match.nw_src = p.protosrc
match.nw_dst = p.protodst
return match
def clone (self):
n = ofp_match()
for k,v in ofp_match_data.iteritems():
setattr(n, '_' + k, getattr(self, '_' + k))
n.wildcards = self.wildcards
return n
def flip (self, in_port = True):
"""
Return version of this match with src and dst fields swapped
in_port can be:
True : Include same in_port in new match
Other : Set Other as in_port in new match
"""
reversed = self.clone()
for field in ('dl','nw','tp'):
setattr(reversed, field + '_src', getattr(self, field + '_dst'))
setattr(reversed, field + '_dst', getattr(self, field + '_src'))
if in_port is not True:
reversed.in_port = in_port
return reversed
def __init__ (self, **kw):
self._locked = False
for k,v in ofp_match_data.iteritems():
setattr(self, '_' + k, v[0])
self.wildcards = self._normalize_wildcards(OFPFW_ALL)
# This is basically initHelper(), but tweaked slightly since this
# class does some magic of its own.
for k,v in kw.iteritems():
if not hasattr(self, '_'+k):
raise TypeError(self.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(self, k, v)
def get_nw_dst (self):
if (self.wildcards & OFPFW_NW_DST_ALL) == OFPFW_NW_DST_ALL:
return (None, 0)
w = (self.wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
return (self._nw_dst,32-w if w <= 32 else 0)
def get_nw_src (self):
if (self.wildcards & OFPFW_NW_SRC_ALL) == OFPFW_NW_SRC_ALL:
return (None, 0)
w = (self.wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
return (self._nw_src,32-w if w <= 32 else 0)
def set_nw_dst (self, *args, **kw):
a = self._make_addr(*args, **kw)
if a is None:
self._nw_dst = ofp_match_data['nw_dst'][0]
self.wildcards &= ~OFPFW_NW_DST_MASK
self.wildcards |= ofp_match_data['nw_dst'][1]
return
self._nw_dst = a[0]
self.wildcards &= ~OFPFW_NW_DST_MASK
self.wildcards |= ((32-a[1]) << OFPFW_NW_DST_SHIFT)
def set_nw_src (self, *args, **kw):
a = self._make_addr(*args, **kw)
if a is None:
self._nw_src = ofp_match_data['nw_src'][0]
self.wildcards &= ~OFPFW_NW_SRC_MASK
self.wildcards |= ofp_match_data['nw_src'][1]
return
self._nw_src = a[0]
self.wildcards &= ~OFPFW_NW_SRC_MASK
self.wildcards |= ((32-a[1]) << OFPFW_NW_SRC_SHIFT)
def _make_addr (self, ipOrIPAndBits, bits=None):
if ipOrIPAndBits is None: return None
b = None
if type(ipOrIPAndBits) is tuple:
ip = ipOrIPAndBits[0]
b = ipOrIPAndBits[1]
b = 32 if b is None else int(b)
elif (type(ipOrIPAndBits) is str) and (len(ipOrIPAndBits) != 4):
if ipOrIPAndBits.find('/') != -1:
s = parse_cidr(ipOrIPAndBits, infer=False)
ip = s[0]
b = int(s[1]) if b is None else b
else:
ip = ipOrIPAndBits
b = 32 if b is None else b
else:
ip = ipOrIPAndBits
b = 32 if b is None else b
if type(ip) is str:
ip = IPAddr(ip)
if bits != None: b = bits
if b > 32: b = 32
elif b < 0: b = 0
return (ip, b)
def __setattr__ (self, name, value):
if name == '_locked':
super(ofp_match,self).__setattr__(name, value)
return
if self._locked:
raise AttributeError('match object is locked')
if name not in ofp_match_data:
self.__dict__[name] = value
return
if name == 'nw_dst' or name == 'nw_src':
# Special handling
getattr(self, 'set_' + name)(value)
return value
if value is None:
setattr(self, '_' + name, ofp_match_data[name][0])
self.wildcards |= ofp_match_data[name][1]
else:
setattr(self, '_' + name, value)
self.wildcards = self.wildcards & ~ofp_match_data[name][1]
return value
def __getattr__ (self, name):
if name in ofp_match_data:
if ( (self.wildcards & ofp_match_data[name][1])
== ofp_match_data[name][1] ):
# It's wildcarded -- always return None
return None
if name == 'nw_dst' or name == 'nw_src':
# Special handling
return getattr(self, 'get_' + name)()[0]
return self.__dict__['_' + name]
raise AttributeError("attribute not found: "+name)
def _validate (self):
# TODO
return None
def _prereq_warning (self):
# Only checked when assertions are on
if not _logger: return True
om = self.clone()
om.fix()
if om == self: return True
msg = "Fields ignored due to unspecified prerequisites: "
wcs = []
for name in ofp_match_data.keys():
if getattr(self,name) is None: continue
if getattr(om,name) is not None: continue
wcs.append(name)
msg = msg + " ".join(wcs)
_log(warn = msg)
_log(debug = "Problematic match: " + str(self))
return True # Always; we don't actually want an assertion error
def pack (self, flow_mod=False):
assert self._assert()
packed = b""
if self.adjust_wildcards and flow_mod:
wc = self._wire_wildcards(self.wildcards)
assert self._prereq_warning()
else:
wc = self.wildcards
packed += struct.pack("!LH", wc, self.in_port or 0)
if self.dl_src is None:
packed += EMPTY_ETH.toRaw()
elif type(self.dl_src) is bytes:
packed += self.dl_src
else:
packed += self.dl_src.toRaw()
if self.dl_dst is None:
packed += EMPTY_ETH.toRaw()
elif type(self.dl_dst) is bytes:
packed += self.dl_dst
else:
packed += self.dl_dst.toRaw()
def check_ip(val):
return (val or 0) if self.dl_type == 0x0800 else 0
def check_ip_or_arp(val):
return (val or 0) if self.dl_type == 0x0800 \
or self.dl_type == 0x0806 else 0
def check_tp(val):
return (val or 0) if self.dl_type == 0x0800 \
and self.nw_proto in (1,6,17) else 0
packed += struct.pack("!HB", self.dl_vlan or 0, self.dl_vlan_pcp or 0)
packed += _PAD # Hardcode padding
packed += struct.pack("!HBB", self.dl_type or 0,
check_ip(self.nw_tos), check_ip_or_arp(self.nw_proto))
packed += _PAD2 # Hardcode padding
def fix (addr):
if addr is None: return 0
if type(addr) is int: return addr & 0xffFFffFF
if type(addr) is long: return addr & 0xffFFffFF
return addr.toUnsigned()
packed += struct.pack("!LLHH", check_ip_or_arp(fix(self.nw_src)),
check_ip_or_arp(fix(self.nw_dst)),
check_tp(self.tp_src), check_tp(self.tp_dst))
return packed
def _normalize_wildcards (self, wildcards):
"""
nw_src and nw_dst values greater than 32 mean the same thing as 32.
We normalize them here just to be clean and so that comparisons act
as you'd want them to.
"""
if ((wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT) > 32:
wildcards &= ~OFPFW_NW_SRC_MASK
wildcards |= (32 << OFPFW_NW_SRC_SHIFT)
if ((wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT) > 32:
wildcards &= ~OFPFW_NW_DST_MASK
wildcards |= (32 << OFPFW_NW_DST_SHIFT)
return wildcards
def _wire_wildcards (self, wildcards):
"""
Normalize the wildcard bits
Note the following from the OpenFlow 1.1 spec:
Protocol-specific fields within ofp_match will be ignored within
a single table when the corresponding protocol is not specified in the
match. The IP header and transport header fields
will be ignored unless the Ethertype is specified as either IPv4 or
ARP. The tp_src and tp_dst fields will be ignored unless the network
protocol specified is as TCP, UDP or SCTP. Fields that are ignored
don't need to be wildcarded and should be set to 0.
OpenFlow 1.0.1 Section 3.4 actually has an improved version of the above,
but we won't quote it here because it seems to have a restrictive license.
"""
#TODO: Set the masked fields to 0.
if self.dl_type == 0x0800:
# IP
if self.nw_proto not in (1,6,17):
# not TCP/UDP/ICMP -> Clear TP wildcards for the wire
return wildcards & ~(OFPFW_TP_SRC | OFPFW_TP_DST)
else:
return wildcards
elif self.dl_type == 0x0806:
# ARP: clear NW_TOS / TP wildcards for the wire
return wildcards & ~( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST)
else:
# not even IP. Clear NW/TP wildcards for the wire
return wildcards & ~( OFPFW_NW_TOS | OFPFW_NW_PROTO
| OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
| OFPFW_TP_SRC | OFPFW_TP_DST)
def fix (self):
"""
Removes unmatchable fields
The logic in this should exactly match that in _wire_wildcards()
"""
if self.dl_type == 0x0800:
# IP
if self.nw_proto not in (1,6,17):
# not TCP/UDP/ICMP -> Clear TP wildcards for the wire
self.tp_src = None
self.tp_dst = None
return
elif self.dl_type == 0x0806:
# ARP: clear NW_TOS / TP wildcards for the wire
self.tp_src = None
self.tp_dst = None
self.nw_tos = None
return
else:
# not even IP. Clear NW/TP wildcards for the wire
self.nw_tos = None
self.nw_proto = None
self.nw_src = None
self.nw_dst = None
self.tp_src = None
self.tp_dst = None
return
def _unwire_wildcards (self, wildcards):
"""
Normalize the wildcard bits from the openflow wire representation.
Note this atrocity from the OF1.1 spec:
Protocol-specific fields within ofp_match will be ignored within
a single table when the corresponding protocol is not specified in the
match. The IP header and transport header fields
will be ignored unless the Ethertype is specified as either IPv4 or
ARP. The tp_src and tp_dst fields will be ignored unless the network
protocol specified is as TCP, UDP or SCTP. Fields that are ignored
don't need to be wildcarded and should be set to 0.
"""
if self._dl_type == 0x0800:
# IP
if self._nw_proto not in (1,6,17):
# not TCP/UDP/ICMP -> Set TP wildcards for the object
return wildcards | (OFPFW_TP_SRC | OFPFW_TP_DST)
else:
return wildcards
elif self._dl_type == 0x0806:
# ARP: Set NW_TOS / TP wildcards for the object
return wildcards | ( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST)
else:
# not even IP. Set NW/TP wildcards for the object
return wildcards | ( OFPFW_NW_TOS | OFPFW_NW_PROTO
| OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
| OFPFW_TP_SRC | OFPFW_TP_DST)
@property
def is_wildcarded (self):
return self.wildcards & OFPFW_ALL != 0
@property
def is_exact (self):
return not self.is_wildcarded
def unpack (self, raw, offset=0, flow_mod=False):
_offset = offset
offset,(wildcards, self._in_port) = _unpack("!LH",raw, offset)
offset,self._dl_src = _readether(raw, offset)
offset,self._dl_dst = _readether(raw, offset)
offset,(self._dl_vlan, self._dl_vlan_pcp) = \
_unpack("!HB", raw, offset)
offset = _skip(raw, offset, 1)
offset,(self._dl_type, self._nw_tos, self._nw_proto) = \
_unpack("!HBB", raw, offset)
offset = _skip(raw, offset, 2)
offset,self._nw_src = _readip(raw, offset)
offset,self._nw_dst = _readip(raw, offset)
offset,(self._tp_src, self._tp_dst) = _unpack("!HH", raw, offset)
# Only unwire wildcards for flow_mod
self.wildcards = self._normalize_wildcards(
self._unwire_wildcards(wildcards) if flow_mod else wildcards)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 40
def hash_code (self):
"""
generate a hash value for this match
This generates a hash code which might be useful, but without locking
the match object.
"""
h = self.wildcards
for f in ofp_match_data:
v = getattr(self, f)
if type(v) is int:
h ^= v
elif type(v) is long:
h ^= v
else:
h ^= hash(v)
return int(h & 0x7fFFffFF)
def __hash__ (self):
self._locked = True
return self.hash_code()
def matches_with_wildcards (self, other, consider_other_wildcards=True):
"""
Test whether /this/ match completely encompasses the other match.
if consider_other_wildcards, then the *other* match must also have
no more wildcards than we do (it must be no wider than we are)
Important for non-strict modify flow_mods etc.
"""
assert assert_type("other", other, ofp_match, none_ok=False)
# shortcut for equal matches
if self == other: return True
if consider_other_wildcards:
# Check that other doesn't have more wildcards than we do -- it
# must be narrower (or equal) to us.
self_bits = self.wildcards&~(OFPFW_NW_SRC_MASK|OFPFW_NW_DST_MASK)
other_bits = other.wildcards&~(OFPFW_NW_SRC_MASK|OFPFW_NW_DST_MASK)
if (self_bits | other_bits) != self_bits: return False
def match_fail (mine, others):
if mine is None: return False # Wildcarded
return mine != others
if match_fail(self.in_port, other.in_port): return False
if match_fail(self.dl_vlan, other.dl_vlan): return False
if match_fail(self.dl_src, other.dl_src): return False
if match_fail(self.dl_dst, other.dl_dst): return False
if match_fail(self.dl_type, other.dl_type): return False
if match_fail(self.nw_proto, other.nw_proto): return False
if match_fail(self.tp_src, other.tp_src): return False
if match_fail(self.tp_dst, other.tp_dst): return False
if match_fail(self.dl_vlan_pcp, other.dl_vlan_pcp): return False
if match_fail(self.nw_tos, other.nw_tos): return False
#FIXME: The two ??? checks below look like they compare other
# wildcards always -- even when consider_other_wildcards=False.
# Is this intentional? (I think it might be subtly wrong and
# we actually may need to mask off some bits and do the
# inNetwork check or something...)
self_nw_src = self.get_nw_src()
if self_nw_src[0] is not None:
other_nw_src = other.get_nw_src()
if self_nw_src[1] > other_nw_src[1]: return False #???
if not IPAddr(other_nw_src[0]).inNetwork(
(self_nw_src[0], self_nw_src[1])): return False
self_nw_dst = self.get_nw_dst()
if self_nw_dst[0] is not None:
other_nw_dst = other.get_nw_dst()
if self_nw_dst[1] > other_nw_dst[1]: return False #???
if not IPAddr(other_nw_dst[0]).inNetwork(
(self_nw_dst[0], self_nw_dst[1])): return False
return True
def __eq__ (self, other):
if type(self) != type(other): return False
if self.wildcards != other.wildcards: return False
if self.in_port != other.in_port: return False
if self.dl_src != other.dl_src: return False
if self.dl_dst != other.dl_dst: return False
if self.dl_vlan != other.dl_vlan: return False
if self.dl_vlan_pcp != other.dl_vlan_pcp: return False
if self.dl_type != other.dl_type: return False
if self.nw_tos != other.nw_tos: return False
if self.nw_proto != other.nw_proto: return False
if self.nw_src != other.nw_src: return False
if self.nw_dst != other.nw_dst: return False
if self.tp_src != other.tp_src: return False
if self.tp_dst != other.tp_dst: return False
return True
def __str__ (self):
return self.__class__.__name__ + "\n " + self.show(' ').strip()
def show (self, prefix=''):
def binstr (n):
s = ''
while True:
s = ('1' if n & 1 else '0') + s
n >>= 1
if n == 0: break
return s
def safehex(n):
if n is None:
return "(None)"
else:
return hex(n)
def show_wildcards(w):
parts = [ k.lower()[len("OFPFW_"):]
for (k,v) in ofp_flow_wildcards_rev_map.iteritems()
if v & w == v ]
nw_src_bits = (w & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
if nw_src_bits > 0:
parts.append("nw_src(/%d)" % (32 - nw_src_bits))
nw_dst_bits = (w & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
if nw_dst_bits > 0:
parts.append("nw_dst(/%d)" % (32 - nw_dst_bits))
return "|".join(parts)
outstr = ''
outstr += prefix + 'wildcards: '
outstr += show_wildcards(self.wildcards)
outstr += ' (%s = %x)\n' % (binstr(self.wildcards), self.wildcards)
def append (f, formatter=str):
v = self.__getattr__(f)
if v is None: return ''
return prefix + f + ": " + formatter(v) + "\n"
outstr += append('in_port')
outstr += append('dl_src')
outstr += append('dl_dst')
outstr += append('dl_vlan')
outstr += append('dl_vlan_pcp')
outstr += append('dl_type', safehex)
outstr += append('nw_tos')
outstr += append('nw_proto')
outstr += append('nw_src')
outstr += append('nw_dst')
outstr += append('tp_src')
outstr += append('tp_dst')
return outstr
class ofp_action_generic (ofp_action_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.type = None # Purposely bad
self.data = _PAD4
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.type, len(self))
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length-4)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 4 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_action('OFPAT_OUTPUT', 0)
class ofp_action_output (ofp_action_base):
def __init__ (self, **kw):
self.port = None # Purposely bad -- require specification
self.max_len = 0xffFF
initHelper(self, kw)
def pack (self):
if self.port != OFPP_CONTROLLER:
self.max_len = 0
assert self._assert()
packed = b""
packed += struct.pack("!HHHH", self.type, len(self), self.port,
self.max_len)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.port, self.max_len) = \
_unpack("!HHHH", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.port != other.port: return False
if self.max_len != other.max_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'max_len: ' + str(self.max_len) + '\n'
return outstr
@openflow_action('OFPAT_ENQUEUE', 11)
class ofp_action_enqueue (ofp_action_base):
def __init__ (self, **kw):
self.port = None # Require user to set
self.queue_id = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.port)
packed += _PAD6 # Pad
packed += struct.pack("!L", self.queue_id)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.port) = _unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.queue_id,) = _unpack("!L", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.port != other.port: return False
if self.queue_id != other.queue_id: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
return outstr
@openflow_action('OFPAT_STRIP_VLAN', 3)
class ofp_action_strip_vlan (ofp_action_base):
def __init__ (self):
pass
def pack (self):
packed = struct.pack("!HHi", self.type, len(self), 0)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset = _skip(raw, offset, 4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_action('OFPAT_SET_VLAN_VID', 1)
class ofp_action_vlan_vid (ofp_action_base):
def __init__ (self, **kw):
self.vlan_vid = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.vlan_vid)
packed += _PAD2 # Pad
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vlan_vid) = \
_unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 2)
#TODO: check length for this and other actions
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vlan_vid != other.vlan_vid: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vlan_vid: ' + str(self.vlan_vid) + '\n'
return outstr
ofp_action_set_vlan_vid = ofp_action_vlan_vid
@openflow_action('OFPAT_SET_VLAN_PCP', 2)
class ofp_action_vlan_pcp (ofp_action_base):
def __init__ (self, **kw):
self.vlan_pcp = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHB", self.type, len(self), self.vlan_pcp)
packed += _PAD3 # Pad
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vlan_pcp) = \
_unpack("!HHB", raw, offset)
offset = _skip(raw, offset, 3)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vlan_pcp != other.vlan_pcp: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vlan_pcp: ' + str(self.vlan_pcp) + '\n'
return outstr
ofp_action_set_vlan_pcp = ofp_action_vlan_pcp
@openflow_action('OFPAT_SET_DL_DST', 5)
@openflow_action('OFPAT_SET_DL_SRC', 4)
class ofp_action_dl_addr (ofp_action_base):
@classmethod
def set_dst (cls, dl_addr = None):
return cls(OFPAT_SET_DL_DST, dl_addr)
@classmethod
def set_src (cls, dl_addr = None):
return cls(OFPAT_SET_DL_SRC, dl_addr)
def __init__ (self, type = None, dl_addr = None):
"""
'type' should be OFPAT_SET_DL_SRC or OFPAT_SET_DL_DST.
"""
self.type = type
self.dl_addr = EMPTY_ETH
if dl_addr is not None:
self.dl_addr = EthAddr(dl_addr)
def _validate (self):
if (not isinstance(self.dl_addr, EthAddr)
and not isinstance(self.dl_addr, bytes)):
return "dl_addr is not string or EthAddr"
if isinstance(self.dl_addr, bytes) and len(self.dl_addr) != 6:
return "dl_addr is not of size 6"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.type, len(self))
if isinstance(self.dl_addr, EthAddr):
packed += self.dl_addr.toRaw()
else:
packed += self.dl_addr
packed += _PAD6
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.dl_addr = _readether(raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.dl_addr != other.dl_addr: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'dl_addr: ' + str(self.dl_addr) + '\n'
return outstr
@openflow_action('OFPAT_SET_NW_DST', 7)
@openflow_action('OFPAT_SET_NW_SRC', 6)
class ofp_action_nw_addr (ofp_action_base):
@classmethod
def set_dst (cls, nw_addr = None):
return cls(OFPAT_SET_NW_DST, nw_addr)
@classmethod
def set_src (cls, nw_addr = None):
return cls(OFPAT_SET_NW_SRC, nw_addr)
def __init__ (self, type = None, nw_addr = None):
"""
'type' should be OFPAT_SET_NW_SRC or OFPAT_SET_NW_DST
"""
self.type = type
if nw_addr is not None:
self.nw_addr = IPAddr(nw_addr)
else:
self.nw_addr = IPAddr(0)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHl", self.type, len(self),
self.nw_addr.toSigned())
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.nw_addr = _readip(raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.nw_addr != other.nw_addr: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'nw_addr: ' + str(self.nw_addr) + '\n'
return outstr
@openflow_action('OFPAT_SET_NW_TOS', 8)
class ofp_action_nw_tos (ofp_action_base):
def __init__ (self, nw_tos = 0):
self.nw_tos = nw_tos
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHB", self.type, len(self), self.nw_tos)
packed += _PAD3
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.nw_tos) = _unpack("!HHB", raw, offset)
offset = _skip(raw, offset, 3)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.nw_tos != other.nw_tos: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'nw_tos: ' + str(self.nw_tos) + '\n'
return outstr
@openflow_action('OFPAT_SET_TP_DST', 10)
@openflow_action('OFPAT_SET_TP_SRC', 9)
class ofp_action_tp_port (ofp_action_base):
@classmethod
def set_dst (cls, tp_port = None):
return cls(OFPAT_SET_TP_DST, tp_port)
@classmethod
def set_src (cls, tp_port = None):
return cls(OFPAT_SET_TP_SRC, tp_port)
def __init__ (self, type=None, tp_port = 0):
"""
'type' is OFPAT_SET_TP_SRC/DST
"""
self.type = type
self.tp_port = tp_port
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.tp_port)
packed += _PAD2
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.tp_port) = \
_unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 2)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.tp_port != other.tp_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'tp_port: ' + str(self.tp_port) + '\n'
return outstr
class ofp_action_vendor_base (ofp_action_base):
"""
Base class for vendor actions
"""
type = 65535 # OFPAT_VENDOR
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return True
def _init (self, kw):
"""
Initialize fields
Overide this.
"""
pass
def _pack_body (self):
"""
Pack body.
"""
return b""
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
return offset
def _body_length (self):
"""
Return length of body.
This should include everything after the length field.
Optionally override this.
"""
return len(self._pack_body())
def _show (self, prefix):
"""
Format additional fields as text
"""
return ""
def __init__ (self, **kw):
self._init(kw)
assert hasattr(self, 'vendor')
#self.vendor = 0
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.body, 'pack'):
return self.body.pack()
else:
return bytes(self.body)
def pack (self):
assert self._assert()
body = self._pack_body()
packed = b""
packed += struct.pack("!HHL", self.type, 8 + len(body), self.vendor)
packed += body
assert (len(packed) % 8) == 0, "Vendor action length not multiple of 8"
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vendor) = _unpack("!HHL", raw, offset)
offset = self._unpack_body(raw, offset, length - 8)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 8 + self._body_length()
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vendor != other.vendor: return False
return self._eq(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += self._show(prefix)
return outstr
@openflow_action('OFPAT_VENDOR', 65535)
class ofp_action_vendor_generic (ofp_action_base):
def __init__ (self, **kw):
self.vendor = 0
self.body = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.body, 'pack'):
return self.body.pack()
else:
return bytes(self.body)
def pack (self):
assert self._assert()
body = self._pack_body()
packed = b""
packed += struct.pack("!HHL", self.type, 8 + len(body), self.vendor)
packed += body
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vendor) = _unpack("!HHL", raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 8 + len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vendor != other.vendor: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
return outstr
#3. Controller-to-Switch Messages
##3.1 Handshake
@openflow_s_message("OFPT_FEATURES_REPLY", 6,
reply_to="ofp_features_request")
class ofp_features_reply (ofp_header):
_MIN_LENGTH = 32
def __init__ (self, **kw):
ofp_header.__init__(self)
self.datapath_id = 0
self.n_buffers = 0
self.n_tables = 0
self.capabilities = 0
self.actions = 0
self.ports = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!QLB", self.datapath_id, self.n_buffers,
self.n_tables)
packed += _PAD3
packed += struct.pack("!LL", self.capabilities, self.actions)
for i in self.ports:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.datapath_id, self.n_buffers, self.n_tables) = \
_unpack("!QLB", raw, offset)
offset = _skip(raw, offset, 3)
offset,(self.capabilities, self.actions) = _unpack("!LL", raw, offset)
portCount = (length - 32) // len(ofp_phy_port)
self.ports = []
for i in xrange(0, portCount):
p = ofp_phy_port()
offset = p.unpack(raw, offset)
self.ports.append(p)
assert length == len(self)
return offset,length
def __len__ (self):
return 32 + len(self.ports) * len(ofp_phy_port)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.datapath_id != other.datapath_id: return False
if self.n_buffers != other.n_buffers: return False
if self.n_tables != other.n_tables: return False
if self.capabilities != other.capabilities: return False
if self.actions != other.actions: return False
if self.ports != other.ports: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'datapath_id: ' + str(self.datapath_id) + '\n'
outstr += prefix + 'n_buffers: ' + str(self.n_buffers) + '\n'
outstr += prefix + 'n_tables: ' + str(self.n_tables) + '\n'
outstr += prefix + 'capabilities: ' + str(self.capabilities) + '\n'
outstr += prefix + 'actions: ' + str(self.actions) + '\n'
outstr += prefix + 'ports: \n'
for obj in self.ports:
outstr += obj.show(prefix + ' ')
return outstr
ofp_switch_features = ofp_features_reply
##3.2 Switch Configuration
@openflow_c_message("OFPT_SET_CONFIG", 9)
class ofp_set_config (ofp_header): # uses ofp_switch_config
def __init__ (self, **kw):
ofp_header.__init__(self)
self.flags = 0
self.miss_send_len = OFP_DEFAULT_MISS_SEND_LEN
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.flags, self.miss_send_len)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.flags, self.miss_send_len) = _unpack("!HH", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'miss_send_len: ' + str(self.miss_send_len) + '\n'
return outstr
##3.3 Modify State Messages
@openflow_c_message("OFPT_FLOW_MOD", 14)
class ofp_flow_mod (ofp_header):
_MIN_LENGTH = 72
def __init__ (self, **kw):
ofp_header.__init__(self)
if 'match' in kw:
self.match = None
else:
self.match = ofp_match()
self.cookie = 0
self.command = OFPFC_ADD
self.idle_timeout = 0
self.hard_timeout = 0
self.priority = OFP_DEFAULT_PRIORITY
self._buffer_id = NO_BUFFER
self.out_port = OFPP_NONE
self.flags = 0
self.actions = []
self.data = None # Not in the spec! Special magic! Can be packet_in.
# ofp_flow_mod/ofp_packet_out do some special handling of 'actions'...
# Allow "action" as a synonym for "actions"
if 'action' in kw and 'actions' not in kw:
kw['actions'] = kw['action']
del kw['action']
initHelper(self, kw)
# Allow use of actions=<a single action> for kw args.
if not hasattr(self.actions, '__getitem__'):
self.actions = [self.actions]
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
"""
Packs this object into its wire format.
May normalize fields.
NOTE: If "data" has been specified, this method may actually return
*more than just a single ofp_flow_mod* in packed form.
Specifically, it may also have a barrier and an ofp_packet_out.
"""
po = None
buffer_id = self.buffer_id
if self.data:
if not self.data.is_complete:
_log(warn="flow_mod is trying to include incomplete data")
else:
self.buffer_id = self.data.buffer_id # Hacky
if self.buffer_id is None:
po = ofp_packet_out(data=self.data)
po.in_port = self.data.in_port
po.actions.append(ofp_action_output(port = OFPP_TABLE))
#FIXME: Should maybe check that packet hits the new entry...
# Or just duplicate the actions? (I think that's the best idea)
buffer_id = self.buffer_id
self.buffer_id = None
if buffer_id is None:
buffer_id = NO_BUFFER
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.match.pack(flow_mod=True)
packed += struct.pack("!QHHHHLHH", self.cookie, self.command,
self.idle_timeout, self.hard_timeout,
self.priority, buffer_id, self.out_port,
self.flags)
for i in self.actions:
packed += i.pack()
if po:
packed += ofp_barrier_request().pack()
packed += po.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset = self.match.unpack(raw, offset, flow_mod=True)
offset,(self.cookie, self.command, self.idle_timeout,
self.hard_timeout, self.priority, self._buffer_id,
self.out_port, self.flags) = \
_unpack("!QHHHHLHH", raw, offset)
offset,self.actions = _unpack_actions(raw,
length-(32 + len(self.match)), offset)
assert length == len(self)
return offset,length
def __len__ (self):
l = 32 + len(self.match)
for i in self.actions:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.command != other.command: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'command: ' + str(self.command) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
@openflow_c_message("OFPT_PORT_MOD", 15)
class ofp_port_mod (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port_no = 0
self.hw_addr = EMPTY_ETH
self.config = 0
self.mask = 0
self.advertise = 0
initHelper(self, kw)
def _validate (self):
if (not isinstance(self.hw_addr, bytes)
and not isinstance(self.hw_addr, EthAddr)):
return "hw_addr is not bytes or EthAddr"
if len(self.hw_addr) != 6:
return "hw_addr is not of size 6"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port_no)
if isinstance(self.hw_addr, bytes):
packed += self.hw_addr
else:
packed += self.hw_addr.toRaw()
packed += struct.pack("!LLL", self.config, self.mask, self.advertise)
packed += _PAD4
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset,self.hw_addr = _readether(raw, offset)
offset,(self.config, self.mask, self.advertise) = \
_unpack("!LLL", raw, offset)
offset = _skip(raw, offset, 4)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 32
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.config != other.config: return False
if self.mask != other.mask: return False
if self.advertise != other.advertise: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'hw_addr: ' + str(EthAddr(self.hw_addr)) + '\n'
outstr += prefix + 'config: ' + str(self.config) + '\n'
outstr += prefix + 'mask: ' + str(self.mask) + '\n'
outstr += prefix + 'advertise: ' + str(self.advertise) + '\n'
return outstr
##3.4 Queue Configuration Messages
@openflow_c_message("OFPT_QUEUE_GET_CONFIG_REQUEST", 20)
class ofp_queue_get_config_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port)
packed += _PAD2
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 2)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port != other.port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port: ' + str(self.port) + '\n'
return outstr
@openflow_s_message("OFPT_QUEUE_GET_CONFIG_REPLY", 21)
class ofp_queue_get_config_reply (ofp_header):
_MIN_LENGTH = 16
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port = 0
self.queues = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port)
packed += _PAD6
for i in self.queues:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
remaining = length - 6 - 2 - len(ofp_header)
del self.queues[:]
# Not tested; probably buggy
while remaining > 0:
q = ofp_packet_queue()
_offset = q.unpack(raw, offset)
l = _offset - offset
offset = _offset
if l < 1: raise RuntimeError("Can't parse")
remaining -= l
self.queues.append(q)
assert length == len(self)
return offset,length
def __len__ (self):
l = 16
for i in self.queues:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port != other.port: return False
if self.queues != other.queues: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'queues: \n'
for obj in self.queues:
outstr += obj.show(prefix + ' ')
return outstr
@openflow_c_message("OFPT_STATS_REQUEST", 16)
class ofp_stats_request (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = None # Try to guess
self.flags = 0
self._body = b''
self._body_packed = None # Cache
initHelper(self, kw)
def pack (self):
if self.type is None:
if isinstance(self.body, ofp_stats_body_base):
self.type = self.body._type
else:
raise RuntimeError("Can't determine body type; specify it "
+ "explicitly")
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.flags)
packed += self._pack_body()
return packed
def _pack_body (self):
if self._body_packed is None:
if hasattr(self.body, 'pack'):
self._body_packed = self._body.pack()
else:
self._body_packed = self._body
return self._body_packed
@property
def body (self):
return self._body
@body.setter
def body (self, data):
self._body = data
self._body_packed_cache = None
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.flags) = _unpack("!HH", raw, offset)
offset,body = _read(raw, offset, length - 12)
si = _stats_type_to_class_info.get(self.type)
if si is None:
self.body = ofp_generic_stats_body()
self.body.unpack(body, 0, len(body))
else:
if si.request is None:
raise RuntimeError("No request for " + str(si))
self.body = si.request()
self.body.unpack(body, 0, len(body))
#TODO: assert entire body is unpacked
assert length == len(self)
return offset,length
def __len__ (self):
return 12 + len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.flags != other.flags: return False
if self._pack_body() != other._pack_body(): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_s_message("OFPT_STATS_REPLY", 17,
reply_to="ofp_stats_request")
class ofp_stats_reply (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = None # Guess
self.flags = 0
self.body = b''
self._body_data = (None, None)
initHelper(self, kw)
@property
def is_last_reply (self):
return (self.flags & 1) == 0
@is_last_reply.setter
def is_last_reply (self, value):
self.flags = self.flags & 0xfffe
if not value:
self.flags |= 1
@property
def body_data (self):
if self._body_data[0] is not self.body:
def _pack(b):
return b.pack() if hasattr(b, 'pack') else b
data = b''
if is_listlike(self.body):
for b in self.body:
data += _pack(b)
else:
data = _pack(self.body)
self._body_data = (self.body, data)
return self._body_data[1]
def pack (self):
if self.type is None:
if is_listlike(self.body):
if len(self.body):
b = self.body[0]
else:
b = None # Will fail below
else:
b = self.body
if isinstance(b, ofp_stats_body_base):
self.type = b._type
else:
raise RuntimeError("Can't determine body type; specify it "
+ "explicitly")
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.flags)
packed += self.body_data
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.flags) = _unpack("!HH", raw, offset)
offset,packed = _read(raw, offset, length - 12)
t = _stats_type_to_class_info.get(self.type)
if t is None:
#FIXME: Put in a generic container?
self.body = packed
else:
if t.reply is None:
#FIXME: Put in a generic container?
self.body = packed
else:
if not t.reply_is_list:
self.body = t.reply()
self.body.unpack(packed, 0, len(packed))
else:
prev_len = len(packed)
self.body = []
while len(packed):
part = t.reply()
off = part.unpack(packed, 0, len(packed))
packed = packed[off:]
assert len(packed) != prev_len
prev_len = len(packed)
self.body.append(part)
assert length == len(self)
return offset,length
def __len__ (self):
if isinstance(self.body, list):
return 12 + sum(len(part) for part in self.body)
return 12 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.flags != other.flags: return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'body:\n'
body = self.body
if not is_listlike(body):
body = [body]
for b in body:
outstr += _format_body(b, prefix + ' ') + '\n'
return outstr
@openflow_stats_reply("OFPST_DESC", 0)
class ofp_desc_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.mfr_desc = ""
self.hw_desc = ""
self.sw_desc = ""
self.serial_num = ""
self.dp_desc = ""
initHelper(self, kw)
def _validate (self):
if not isinstance(self.mfr_desc, str):
return "mfr_desc is not string"
if len(self.mfr_desc) > DESC_STR_LEN:
return "mfr_desc is not of size 256"
if not isinstance(self.hw_desc, str):
return "hw_desc is not string"
if len(self.hw_desc) > DESC_STR_LEN:
return "hw_desc is not of size 256"
if not isinstance(self.sw_desc, str):
return "sw_desc is not string"
if len(self.sw_desc) > DESC_STR_LEN:
return "sw_desc is not of size 256"
if not isinstance(self.serial_num, str):
return "serial_num is not string"
if len(self.serial_num) > SERIAL_NUM_LEN:
return "serial_num is not of size 32"
if not isinstance(self.dp_desc, str):
return "dp_desc is not string"
if len(self.dp_desc) > DESC_STR_LEN:
return "dp_desc is not of size 256"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.mfr_desc.ljust(DESC_STR_LEN,'\0')
packed += self.hw_desc.ljust(DESC_STR_LEN,'\0')
packed += self.sw_desc.ljust(DESC_STR_LEN,'\0')
packed += self.serial_num.ljust(SERIAL_NUM_LEN,'\0')
packed += self.dp_desc.ljust(DESC_STR_LEN,'\0')
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,self.mfr_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.hw_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.sw_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.serial_num = _readzs(raw, offset, SERIAL_NUM_LEN)
offset,self.dp_desc = _readzs(raw, offset, DESC_STR_LEN)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 1056
def __eq__ (self, other):
if type(self) != type(other): return False
if self.mfr_desc != other.mfr_desc: return False
if self.hw_desc != other.hw_desc: return False
if self.sw_desc != other.sw_desc: return False
if self.serial_num != other.serial_num: return False
if self.dp_desc != other.dp_desc: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'mfr_desc: ' + str(self.mfr_desc) + '\n'
outstr += prefix + 'hw_desc: ' + str(self.hw_desc) + '\n'
outstr += prefix + 'sw_desc: ' + str(self.sw_desc) + '\n'
outstr += prefix + 'serial_num: ' + str(self.serial_num) + '\n'
outstr += prefix + 'dp_desc: ' + str(self.dp_desc) + '\n'
return outstr
ofp_desc_stats_reply = ofp_desc_stats
class _empty_stats_request_body (ofp_stats_body_base):
"""
Superclass for table stats requests with empty bodies
OFPST_DESC and OFPST_TABLE have empty request bodies. In order
to make type guessing and unpacking consistent, we define
classes for them anyway.
"""
def __init__ (self, **kw):
pass
def pack (self):
return b""
def unpack (self, raw, offset, avail):
if avail != 0:
raise RuntimeError("Expected empty body")
return offset
@staticmethod
def __len__ ():
return 0
def __eq__ (self, other):
if type(self) != type(other): return False
return True
def show (self, prefix=''):
return "<empty>"
@openflow_stats_request('OFPST_DESC', 0)
class ofp_desc_stats_request (_empty_stats_request_body):
"""
See _empty_stats_request_body superclass documentation
"""
pass
@openflow_stats_request('OFPST_TABLE', 3)
class ofp_table_stats_request (_empty_stats_request_body):
"""
See _empty_stats_request_body superclass documentation
"""
pass
@openflow_stats_request('OFPST_FLOW', 1)
class ofp_flow_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.match = ofp_match()
self.table_id = TABLE_ALL
self.out_port = OFPP_NONE
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.match.pack()
packed += struct.pack("!BBH", self.table_id, 0, self.out_port)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset = self.match.unpack(raw, offset)
offset,(self.table_id, pad, self.out_port) = \
_unpack("!BBH", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 4 + len(ofp_match)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
return outstr
@openflow_stats_reply('OFPST_FLOW', is_list = True)
class ofp_flow_stats (ofp_stats_body_base):
_MIN_LENGTH = 88
def __init__ (self, **kw):
self.table_id = 0
self.match = ofp_match()
self.duration_sec = 0
self.duration_nsec = 0
self.priority = OFP_DEFAULT_PRIORITY
self.idle_timeout = 0
self.hard_timeout = 0
self.cookie = 0
self.packet_count = 0
self.byte_count = 0
self.actions = []
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HBB", len(self), self.table_id, 0)
packed += self.match.pack()
packed += struct.pack("!LLHHH", self.duration_sec,
self.duration_nsec, self.priority,
self.idle_timeout, self.hard_timeout)
packed += _PAD6 # Pad
packed += struct.pack("!QQQ", self.cookie, self.packet_count,
self.byte_count)
for i in self.actions:
packed += i.pack()
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(length, self.table_id, pad) = _unpack("!HBB", raw, offset)
assert pad == 0
offset = self.match.unpack(raw, offset)
offset,(self.duration_sec, self.duration_nsec, self.priority,
self.idle_timeout, self.hard_timeout) = \
_unpack("!LLHHH", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.cookie, self.packet_count, self.byte_count) = \
_unpack("!QQQ", raw, offset)
assert (offset - _offset) == 48 + len(self.match)
offset,self.actions = _unpack_actions(raw,
length - (48 + len(self.match)), offset)
assert offset - _offset == len(self)
return offset
def __len__ (self):
l = 48 + len(self.match)
for i in self.actions:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if len(self) != len(other): return False
if self.table_id != other.table_id: return False
if self.match != other.match: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.priority != other.priority: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.cookie != other.cookie: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.actions != other.actions: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'length: ' + str(len(self)) + '\n'
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'duration_sec: ' + str(self.duration_sec) + '\n'
outstr += prefix + 'duration_nsec: ' + str(self.duration_nsec) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
ofp_flow_stats_reply = ofp_flow_stats
@openflow_stats_request('OFPST_AGGREGATE', 2)
class ofp_aggregate_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.match = ofp_match()
self.table_id = TABLE_ALL
self.out_port = OFPP_NONE
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.match.pack()
packed += struct.pack("!BBH", self.table_id, 0, self.out_port)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset = self.match.unpack(raw, offset)
offset,(self.table_id, pad, self.out_port) = \
_unpack("!BBH", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 44
def __eq__ (self, other):
if type(self) != type(other): return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
return outstr
@openflow_stats_reply('OFPST_AGGREGATE')
class ofp_aggregate_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.packet_count = 0
self.byte_count = 0
self.flow_count = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!QQL", self.packet_count, self.byte_count,
self.flow_count)
packed += _PAD4 # Pad
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.packet_count, self.byte_count, self.flow_count) = \
_unpack("!QQL", raw, offset)
offset = _skip(raw, offset, 4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 24
def __eq__ (self, other):
if type(self) != type(other): return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.flow_count != other.flow_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
outstr += prefix + 'flow_count: ' + str(self.flow_count) + '\n'
return outstr
ofp_aggregate_stats_reply = ofp_aggregate_stats
@openflow_stats_reply('OFPST_TABLE', 3, is_list = True)
class ofp_table_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.table_id = 0
self.name = ""
self.wildcards = 0
self.max_entries = 0
self.active_count = 0
self.lookup_count = 0
self.matched_count = 0
initHelper(self, kw)
def _validate (self):
if not isinstance(self.name, str):
return "name is not string"
if len(self.name) > OFP_MAX_TABLE_NAME_LEN:
return "name is too long"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!B", self.table_id)
packed += _PAD3
packed += self.name.ljust(OFP_MAX_TABLE_NAME_LEN,'\0')
packed += struct.pack("!LLLQQ", self.wildcards, self.max_entries,
self.active_count, self.lookup_count,
self.matched_count)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.table_id,) = _unpack("!B", raw, offset)
offset = _skip(raw, offset, 3)
offset,self.name = _readzs(raw, offset, OFP_MAX_TABLE_NAME_LEN)
offset,(self.wildcards, self.max_entries, self.active_count,
self.lookup_count, self.matched_count) = \
_unpack("!LLLQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 64
def __eq__ (self, other):
if type(self) != type(other): return False
if self.table_id != other.table_id: return False
if self.name != other.name: return False
if self.wildcards != other.wildcards: return False
if self.max_entries != other.max_entries: return False
if self.active_count != other.active_count: return False
if self.lookup_count != other.lookup_count: return False
if self.matched_count != other.matched_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'name: ' + str(self.name) + '\n'
outstr += prefix + 'wildcards: ' + str(self.wildcards) + '\n'
outstr += prefix + 'max_entries: ' + str(self.max_entries) + '\n'
outstr += prefix + 'active_count: ' + str(self.active_count) + '\n'
outstr += prefix + 'lookup_count: ' + str(self.lookup_count) + '\n'
outstr += prefix + 'matched_count: ' + str(self.matched_count) + '\n'
return outstr
ofp_table_stats_reply = ofp_table_stats
@openflow_stats_request("OFPST_PORT", 4)
class ofp_port_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_NONE
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD6
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
return outstr
@openflow_stats_reply("OFPST_PORT", is_list = True)
class ofp_port_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_NONE
self.rx_packets = 0
self.tx_packets = 0
self.rx_bytes = 0
self.tx_bytes = 0
self.rx_dropped = 0
self.tx_dropped = 0
self.rx_errors = 0
self.tx_errors = 0
self.rx_frame_err = 0
self.rx_over_err = 0
self.rx_crc_err = 0
self.collisions = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD6
packed += struct.pack("!QQQQQQQQQQQQ", self.rx_packets,
self.tx_packets, self.rx_bytes, self.tx_bytes,
self.rx_dropped, self.tx_dropped,
self.rx_errors, self.tx_errors,
self.rx_frame_err, self.rx_over_err,
self.rx_crc_err, self.collisions)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.rx_packets, self.tx_packets, self.rx_bytes,
self.tx_bytes, self.rx_dropped, self.tx_dropped,
self.rx_errors, self.tx_errors, self.rx_frame_err,
self.rx_over_err, self.rx_crc_err, self.collisions) = \
_unpack("!QQQQQQQQQQQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 104
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.rx_packets != other.rx_packets: return False
if self.tx_packets != other.tx_packets: return False
if self.rx_bytes != other.rx_bytes: return False
if self.tx_bytes != other.tx_bytes: return False
if self.rx_dropped != other.rx_dropped: return False
if self.tx_dropped != other.tx_dropped: return False
if self.rx_errors != other.rx_errors: return False
if self.tx_errors != other.tx_errors: return False
if self.rx_frame_err != other.rx_frame_err: return False
if self.rx_over_err != other.rx_over_err: return False
if self.rx_crc_err != other.rx_crc_err: return False
if self.collisions != other.collisions: return False
return True
def __add__(self, other):
if type(self) != type(other): raise NotImplemented()
port_no = OFPP_NONE
if self.port_no == other.port_no:
port_no = self.port_no
return ofp_port_stats(
port_no=port_no,
rx_packets = self.rx_packets + other.rx_packets,
tx_packets = self.tx_packets + other.tx_packets,
rx_bytes = self.rx_bytes + other.rx_bytes,
tx_bytes = self.tx_bytes + other.tx_bytes,
rx_dropped = self.rx_dropped + other.rx_dropped,
tx_dropped = self.tx_dropped + other.tx_dropped,
rx_errors = self.rx_errors + other.rx_errors,
tx_errors = self.tx_errors + other.tx_errors,
rx_frame_err = self.rx_frame_err + other.rx_frame_err,
rx_over_err = self.rx_over_err + other.rx_over_err,
rx_crc_err = self.rx_crc_err + other.rx_crc_err,
collisions = self.collisions + other.collisions)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'rx_packets: ' + str(self.rx_packets) + '\n'
outstr += prefix + 'tx_packets: ' + str(self.tx_packets) + '\n'
outstr += prefix + 'rx_bytes: ' + str(self.rx_bytes) + '\n'
outstr += prefix + 'tx_bytes: ' + str(self.tx_bytes) + '\n'
outstr += prefix + 'rx_dropped: ' + str(self.rx_dropped) + '\n'
outstr += prefix + 'tx_dropped: ' + str(self.tx_dropped) + '\n'
outstr += prefix + 'rx_errors: ' + str(self.rx_errors) + '\n'
outstr += prefix + 'tx_errors: ' + str(self.tx_errors) + '\n'
outstr += prefix + 'rx_frame_err: ' + str(self.rx_frame_err) + '\n'
outstr += prefix + 'rx_over_err: ' + str(self.rx_over_err) + '\n'
outstr += prefix + 'rx_crc_err: ' + str(self.rx_crc_err) + '\n'
outstr += prefix + 'collisions: ' + str(self.collisions) + '\n'
return outstr
ofp_port_stats_reply = ofp_port_stats
@openflow_stats_request("OFPST_QUEUE", 5)
class ofp_queue_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_ALL
self.queue_id = OFPQ_ALL
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD2
packed += struct.pack("!L", self.queue_id)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,pad,self.queue_id) = _unpack("!HHL", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
return outstr
@openflow_stats_reply("OFPST_QUEUE", is_list = True)
class ofp_queue_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = 0
self.queue_id = 0
self.tx_bytes = 0
self.tx_packets = 0
self.tx_errors = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD2
packed += struct.pack("!LQQQ", self.queue_id, self.tx_bytes,
self.tx_packets, self.tx_errors)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no, pad, self.queue_id, self.tx_bytes,
self.tx_packets, self.tx_errors) = \
_unpack("!HHLQQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 32
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
if self.tx_bytes != other.tx_bytes: return False
if self.tx_packets != other.tx_packets: return False
if self.tx_errors != other.tx_errors: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
outstr += prefix + 'tx_bytes: ' + str(self.tx_bytes) + '\n'
outstr += prefix + 'tx_packets: ' + str(self.tx_packets) + '\n'
outstr += prefix + 'tx_errors: ' + str(self.tx_errors) + '\n'
return outstr
ofp_queue_stats_reply = ofp_queue_stats
@openflow_stats_request("OFPST_VENDOR", 65535, is_list = False)
@openflow_stats_reply("OFPST_VENDOR", 65535, is_list = False)
class ofp_vendor_stats_generic (ofp_stats_body_base):
_MIN_LENGTH = 4
def __init__ (self, **kw):
self.vendor = None
self.data = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.data, "pack"):
return self.data.pack()
else:
return self.data
def pack (self):
assert self._assert()
packed = struct.pack("!L", self.vendor)
packed += self._pack_body()
return packed
def unpack (self, raw, offset, avail):
if avail is None: RuntimeError("Requires length")
_offset = offset
offset,(self.vendor,) = _unpack("!L", raw, offset)
offset,self.data = _read(raw, offset, avail-4)
return offset
@staticmethod
def __len__ ():
return 4+len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.vendor != other.vendor: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'vendor id: ' + str(self.vendor) + '\n'
outstr += prefix + 'data len: ' + str(len(self.data)) + '\n'
return outstr
class ofp_generic_stats_body (ofp_stats_body_base):
_MIN_LENGTH = 0
def __init__ (self, **kw):
self.data = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.data, "pack"):
return self.data.pack()
else:
return self.data
def pack (self):
assert self._assert()
packed += self._pack_body()
return packed
def unpack (self, raw, offset, avail):
if avail is None: RuntimeError("Requires length")
_offset = offset
offset,self.data = _read(raw, offset, avail)
return offset
@staticmethod
def __len__ ():
return len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'data len: ' + str(len(self.data)) + '\n'
return outstr
@openflow_c_message("OFPT_PACKET_OUT", 13)
class ofp_packet_out (ofp_header):
_MIN_LENGTH = 16
def __init__ (self, **kw):
ofp_header.__init__(self)
self._buffer_id = NO_BUFFER
self.in_port = OFPP_NONE
self.actions = []
self._data = b''
# ofp_flow_mod & ofp_packet_out do some special handling of 'actions'
# Allow "action" as a synonym for "actions"
if 'action' in kw and 'actions' not in kw:
kw['actions'] = kw['action']
del kw['action']
initHelper(self, kw)
# Allow use of actions=<a single action> for kw args.
if not hasattr(self.actions, '__getitem__'):
self.actions = [self.actions]
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
@property
def data (self):
return self._data
@data.setter
def data (self, data):
if data is None:
self._data = b''
elif isinstance(data, packet_base):
self._data = data.pack()
elif isinstance(data, ofp_packet_in):
# Enable you to easily resend a packet
self._data = b''
self.buffer_id = data.buffer_id
if self.buffer_id is None:
#TODO: It'd be nice to log and then ignore if data is incomplete
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert data.is_complete
self._data = data._data
self.in_port = data.in_port
elif isinstance(data, bytes):
self._data = data
assert assert_type("data", self._data, (bytes,))
def _validate (self):
if self.buffer_id is not None and self.data != b'':
return "can not have both buffer_id and data set"
return None
def pack (self):
assert self._assert()
actions = b''.join((i.pack() for i in self.actions))
actions_len = len(actions)
if self.data is not None:
return b''.join((ofp_header.pack(self),
struct.pack("!LHH", self._buffer_id, self.in_port, actions_len),
actions, self.data))
else:
return b''.join((ofp_header.pack(self),
struct.pack("!LHH", self._buffer_id, self.in_port, actions_len),
actions))
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(self._buffer_id, self.in_port, actions_len) = \
_unpack("!LHH", raw, offset)
offset,self.actions = _unpack_actions(raw, actions_len, offset)
remaining = length - (offset - _offset)
if remaining <= 0:
self.data = None
else:
offset,self.data = _read(raw, offset, remaining)
assert length == len(self)
return offset,length
def __len__ (self):
return 16 + reduce(operator.add, (len(a) for a in self.actions),
0) + (len(self.data) if self.data else 0)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.buffer_id != other.buffer_id: return False
if self.in_port != other.in_port: return False
if self.actions != other.actions: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'in_port: ' + str(self.in_port) + '\n'
outstr += prefix + 'actions_len: ' + str(len(self.actions)) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
if obj is None:
raise RuntimeError("An element of self.actions was None! "
+ "Bad formatting...")
outstr += obj.show(prefix + ' ')
return outstr
##3.7 Barrier Message
@openflow_s_message("OFPT_BARRIER_REPLY", 19,
reply_to="ofp_barrier_request")
class ofp_barrier_reply (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_c_message("OFPT_BARRIER_REQUEST", 18,
request_for="ofp_barrier_reply")
class ofp_barrier_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
#4 Asynchronous Messages
@openflow_s_message("OFPT_PACKET_IN", 10)
class ofp_packet_in (ofp_header):
_MIN_LENGTH = 18
def __init__ (self, **kw):
ofp_header.__init__(self)
self.in_port = OFPP_NONE
self._buffer_id = NO_BUFFER
self.reason = 0
self.data = None
self._total_len = None
if 'total_len' in kw:
self._total_len = kw.pop('total_len')
initHelper(self, kw)
def _validate (self):
if self.data and (self.total_len < len(self.data)):
return "total len less than data len"
@property
def total_len (self):
if self._total_len is None:
return len(self.data) if self.data else 0
return self._total_len
@total_len.setter
def total_len (self, value):
self._total_len = value
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
@property
def data (self):
return self._data
@data.setter
def data (self, data):
assert assert_type("data", data, (packet_base, bytes))
if data is None:
self._data = ''
elif isinstance(data, packet_base):
self._data = data.pack()
else:
self._data = data
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!LHHBB", self._buffer_id, self.total_len,
self.in_port, self.reason, 0)
packed += self.data
#TODO: Padding? See __len__
return packed
@property
def is_complete (self):
if self.buffer_id is not None: return True
return len(self.data) == self.total_len
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self._buffer_id, self._total_len, self.in_port, self.reason,
pad) = _unpack("!LHHBB", raw, offset)
offset,self.data = _read(raw, offset, length-18)
assert length == len(self)
return offset,length
def __len__ (self):
#FIXME: This is probably wrong, but it's not clear from the
# spec what's supposed to be going on here.
#if len(self.data) < 2:
# return 20 + len(self.data)
return 18 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.buffer_id != other.buffer_id: return False
if self.total_len != other.total_len: return False
if self.in_port != other.in_port: return False
if self.reason != other.reason: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'total_len: ' + str(self._total_len) + '\n'
outstr += prefix + 'in_port: ' + str(self.in_port) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'data: ' + str(self.data) + '\n'
return outstr
@openflow_s_message("OFPT_FLOW_REMOVED", 11)
class ofp_flow_removed (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.match = ofp_match()
self.cookie = 0
self.priority = 0
self.reason = 0
self.duration_sec = 0
self.duration_nsec = 0
self.idle_timeout = 0
self.packet_count = 0
self.byte_count = 0
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.match.pack()
packed += struct.pack("!QHB", self.cookie, self.priority, self.reason)
packed += _PAD
packed += struct.pack("!LLH", self.duration_sec, self.duration_nsec,
self.idle_timeout)
packed += _PAD2
packed += struct.pack("!QQ", self.packet_count, self.byte_count)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset = self.match.unpack(raw, offset)
offset,(self.cookie, self.priority, self.reason) = \
_unpack("!QHB", raw, offset)
offset = _skip(raw, offset, 1)
offset,(self.duration_sec, self.duration_nsec, self.idle_timeout) = \
_unpack("!LLH", raw, offset)
offset = _skip(raw, offset, 2)
offset,(self.packet_count, self.byte_count) = \
_unpack("!QQ", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 48 + len(ofp_match)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.priority != other.priority: return False
if self.reason != other.reason: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.idle_timeout != other.idle_timeout: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'duration_sec: ' + str(self.duration_sec) + '\n'
outstr += prefix + 'duration_nsec: ' + str(self.duration_nsec) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
return outstr
@openflow_s_message("OFPT_PORT_STATUS", 12)
class ofp_port_status (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.reason = 0
self.desc = ofp_phy_port()
initHelper(self, kw)
def _validate (self):
if not isinstance(self.desc, ofp_phy_port):
return "desc is not class ofp_phy_port"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!B", self.reason)
packed += _PAD * 7 # Pad
packed += self.desc.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.reason,) = _unpack("!B", raw, offset)
offset = _skip(raw, offset, 7)
offset = self.desc.unpack(raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 64
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.reason != other.reason: return False
if self.desc != other.desc: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'desc: \n'
outstr += self.desc.show(prefix + ' ')
return outstr
@openflow_s_message("OFPT_PORT_STATS", 22)
class ofpt_port_stats(ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port_no = 0
self.tx_congestion = 0
self.tx_bytes = 0
self.rx_bytes = 0
self.hw_addr = EMPTY_ETH
initHelper(self,kw)
def pack(self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port_no)
packed += _PAD6
packed += struct.pack("!QQ",self.tx_bytes, self.rx_bytes)
return packed
def unpack(self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset,self.hw_addr = _readether(raw, offset)
offset = _skip(raw, offset, 7)
offset,(self.tx_congestion,) = _unpack("!B",raw,offset)
offset,(self.tx_bytes,) = _unpack("!Q", raw, offset)
offset,(self.rx_bytes,) = _unpack("!Q", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__():
return 40
@openflow_s_message("OFPT_ERROR", 1)
class ofp_error (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = 0
self.code = 0
self.data = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.code)
packed += self.data
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.code) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length - 12)
assert length == len(self)
return offset,length
def __len__ (self):
return 12 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
t = self.type
c = self.code
if t < len(ofp_error_type):
n = ofp_error_type_map[t]
t = "%s (%i)" % (n, t)
n = 'ofp' + n.lower()[5:] + '_code_map'
if n in sys.modules[__name__].__dict__:
if c in sys.modules[__name__].__dict__[n]:
c = "%s (%i)" % (sys.modules[__name__].__dict__[n][c], c)
outstr += prefix + 'type: ' + str(t) + '\n'
outstr += prefix + 'code: ' + str(c) + '\n'
if len(self.data):
outstr += prefix + 'datalen: %s\n' % (len(self.data),)
outstr += prefix + hexdump(self.data).replace("\n", "\n" + prefix)
return outstr.strip()
#5. Symmetric Messages
@openflow_sc_message("OFPT_HELLO", 0)
class ofp_hello (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_sc_message("OFPT_ECHO_REQUEST", 2,
request_for="ofp_echo_reply")
class ofp_echo_request (ofp_header):
_MIN_LENGTH = 8
def __init__ (self, **kw):
ofp_header.__init__(self)
self.body = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.body
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert length == len(self)
return offset,length
def __len__ (self):
return 8 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_sc_message("OFPT_ECHO_REPLY", 3,
reply_to="ofp_echo_request")
class ofp_echo_reply (ofp_header):
_MIN_LENGTH = 8
def __init__ (self, **kw):
ofp_header.__init__(self)
self.body = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.body
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert length == len(self)
return offset,length
def __len__ (self):
return 8 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
class ofp_vendor_base (ofp_header):
header_type = 4 # OFPT_VENDOR
"""
Base class for vendor messages
"""
pass
@openflow_sc_message("OFPT_VENDOR", 4)
class ofp_vendor_generic (ofp_vendor_base):
_MIN_LENGTH = 12
_collect_raw = False
def __init__ (self, **kw):
ofp_header.__init__(self)
self.vendor = 0
self.data = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!L", self.vendor)
if hasattr(self.data, "pack"):
packed += self.data.pack()
else:
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(self.vendor,) = _unpack("!L", raw, offset)
offset,self.data = _read(raw, offset, length-12)
if self._collect_raw:
self.raw = raw[_offset, _offset+length]
return offset,length
def __len__ (self):
return 12 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.vendor != other.vendor: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += prefix + 'datalen: ' + str(len(self.data)) + '\n'
#outstr += prefix + hexdump(self.data).replace("\n", "\n" + prefix)
return outstr
@openflow_c_message("OFPT_FEATURES_REQUEST", 5,
request_for="ofp_features_reply")
class ofp_features_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_c_message("OFPT_GET_CONFIG_REQUEST", 7,
request_for="ofp_get_config_reply")
class ofp_get_config_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_s_message("OFPT_GET_CONFIG_REPLY", 8,
reply_to="ofp_get_config_request")
class ofp_get_config_reply (ofp_header): # uses ofp_switch_config
def __init__ (self, **kw):
ofp_header.__init__(self)
self.flags = 0
self.miss_send_len = OFP_DEFAULT_MISS_SEND_LEN
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.flags, self.miss_send_len)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.flags, self.miss_send_len) = \
_unpack("!HH", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'miss_send_len: ' + str(self.miss_send_len) + '\n'
return outstr
def _unpack_queue_props (b, length, offset=0):
"""
Parses queue props from a buffer
b is a buffer (bytes)
offset, if specified, is where in b to start decoding
returns (next_offset, [Pops])
"""
if (len(b) - offset) < length: raise UnderrunError
props = []
end = length + offset
while offset < end:
(t,l) = struct.unpack_from("!HH", b, offset)
if (len(b) - offset) < l: raise UnderrunError
a = _queue_prop_type_to_class.get(t)
if a is None:
# Use generic prop header for unknown type
a = ofp_queue_prop_generic()
else:
a = a()
a.unpack(b[offset:offset+l])
assert len(a) == l
props.append(a)
offset += l
return (offset, props)
def _unpack_actions (b, length, offset=0):
"""
Parses actions from a buffer
b is a buffer (bytes)
offset, if specified, is where in b to start decoding
returns (next_offset, [Actions])
"""
if (len(b) - offset) < length: raise UnderrunError
actions = []
end = length + offset
while offset < end:
(t,l) = struct.unpack_from("!HH", b, offset)
if (len(b) - offset) < l: raise UnderrunError
a = _action_type_to_class.get(t)
if a is None:
# Use generic action header for unknown type
a = ofp_action_generic()
else:
a = a()
a.unpack(b[offset:offset+l])
assert len(a) == l
actions.append(a)
offset += l
return (offset, actions)
def _init ():
def formatMap (name, m):
o = name + " = {\n"
vk = sorted([(v,k) for k,v in m.iteritems()])
maxlen = 2 + len(reduce(lambda a,b: a if len(a)>len(b) else b,
(v for k,v in vk)))
fstr = " %-" + str(maxlen) + "s : %s,\n"
for v,k in vk:
o += fstr % ("'" + k + "'",v)
o += "}"
return o
"""
maps = []
for k,v in globals().iteritems():
if k.startswith("ofp_") and k.endswith("_map") and type(v) == dict:
maps.append((k,v))
for name,m in maps:
rev = {}
name = name[:-4]
names = globals()[name]
for n in names:
rev[n] = globals()[n]
globals()[name + '_rev_map'] = rev
print(formatMap(name + "_rev_map", rev))
return
"""
maps = []
for k,v in globals().iteritems():
if (k.startswith("ofp_") and k.endswith("_rev_map")
and type(v) == dict):
maps.append((k[:-8],v))
for name,m in maps:
# Try to generate forward maps
forward = dict(((v,k) for k,v in m.iteritems()))
if len(forward) == len(m):
if name + "_map" not in globals():
globals()[name + "_map"] = forward
else:
print(name + "_rev_map is not a map")
# Try to generate lists
v = m.values()
v.sort()
if v[-1] != len(v)-1:
# Allow ones where the last value is a special value (e.g., VENDOR)
del v[-1]
if len(v) > 0 and v[0] == 0 and v[-1] == len(v)-1:
globals()[name] = v
# Generate gobals
for k,v in m.iteritems():
globals()[k] = v
_init()
# Values from macro definitions
OFP_FLOW_PERMANENT = 0
OFP_DL_TYPE_ETH2_CUTOFF = 0x0600
DESC_STR_LEN = 256
OFPFW_ICMP_CODE = OFPFW_TP_DST
OFPQ_MIN_RATE_UNCFG = 0xffff
OFP_VERSION = 0x01
OFP_MAX_TABLE_NAME_LEN = 32
OFP_DL_TYPE_NOT_ETH_TYPE = 0x05ff
OFP_DEFAULT_MISS_SEND_LEN = 128
OFP_MAX_PORT_NAME_LEN = 16
OFP_SSL_PORT = 6633
OFPFW_ICMP_TYPE = OFPFW_TP_SRC
OFP_TCP_PORT = 6633
SERIAL_NUM_LEN = 32
OFP_DEFAULT_PRIORITY = 0x8000
OFP_HIGH_PRIORITY = 0x8001
OFP_VLAN_NONE = 0xffff
OFPQ_ALL = 0xffffffff
ofp_match_data = {
'in_port' : (0, OFPFW_IN_PORT),
'dl_src' : (EMPTY_ETH, OFPFW_DL_SRC),
'dl_dst' : (EMPTY_ETH, OFPFW_DL_DST),
'dl_vlan' : (0, OFPFW_DL_VLAN),
'dl_vlan_pcp' : (0, OFPFW_DL_VLAN_PCP),
'dl_type' : (0, OFPFW_DL_TYPE),
'nw_tos' : (0, OFPFW_NW_TOS),
'nw_proto' : (0, OFPFW_NW_PROTO),
'nw_src' : (0, OFPFW_NW_SRC_ALL),
'nw_dst' : (0, OFPFW_NW_DST_ALL),
'tp_src' : (0, OFPFW_TP_SRC),
'tp_dst' : (0, OFPFW_TP_DST),
}
| apache-2.0 |
codinguser/gnucash | src/python/pycons/ishell.py | 14 | 4561 | #! /usr/bin/env python
#
# Adapted from:
#
# Backend to the console plugin.
# @author: Eitan Isaacson
# @organization: IBM Corporation
# @copyright: Copyright (c) 2007 IBM Corporation
# @license: BSD
#
# All rights reserved. This program and the accompanying materials are made
# available under the terms of the BSD which accompanies this distribution, and
# is available at U{http://www.opensource.org/licenses/bsd-license.php}
#
import os
import sys
import re
from StringIO import StringIO
try:
import IPython
from IPython import ipapi
except Exception,e:
raise "Error importing IPython (%s)" % str(e)
# ------------------------------------------------------------------ class Shell
class Shell:
""" """
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
cin=None, cout=None,cerr=None, input_func=None):
""" """
if input_func:
IPython.iplib.raw_input_original = input_func
if cin:
IPython.Shell.Term.cin = cin
if cout:
IPython.Shell.Term.cout = cout
if cerr:
IPython.Shell.Term.cerr = cerr
if argv is None:
argv=[]
IPython.iplib.raw_input = lambda x: None
self.term = IPython.genutils.IOTerm(cin=cin, cout=cout, cerr=cerr)
os.environ['TERM'] = 'dumb'
excepthook = sys.excepthook
self.IP = IPython.Shell.make_IPython(argv,
user_ns=user_ns,
user_global_ns=user_global_ns,
embedded=True,
shell_class=IPython.Shell.InteractiveShell)
self.IP.system = lambda cmd: self.shell(self.IP.var_expand(cmd),
header='IPython system call: ',
verbose=self.IP.rc.system_verbose)
# Get a hold of the public IPython API object and use it
self.ip = ipapi.get()
self.ip.magic('colors LightBG')
sys.excepthook = excepthook
self.iter_more = 0
self.complete_sep = re.compile('[\s\{\}\[\]\(\)]')
def namespace(self):
return self.IP.user_ns
def eval(self, console):
console.write ('\n')
orig_stdout = sys.stdout
sys.stdout = IPython.Shell.Term.cout
try:
line = self.IP.raw_input(None, self.iter_more)
if self.IP.autoindent:
self.IP.readline_startup_hook(None)
except KeyboardInterrupt:
self.IP.write('\nKeyboardInterrupt\n')
self.IP.resetbuffer()
self.IP.outputcache.prompt_count -= 1
if self.IP.autoindent:
self.IP.indent_current_nsp = 0
self.iter_more = 0
except:
self.IP.showtraceback()
else:
self.iter_more = self.IP.push(line)
if (self.IP.SyntaxTB.last_syntax_error and self.IP.rc.autoedit_syntax):
self.IP.edit_syntax_error()
if self.iter_more:
self.prompt = str(self.IP.outputcache.prompt2).strip()
if self.IP.autoindent:
self.IP.readline_startup_hook(self.IP.pre_readline)
else:
self.prompt = str(self.IP.outputcache.prompt1).strip()
sys.stdout = orig_stdout
# System output (if any)
while True:
try:
buf = os.read(console.piperead, 256)
except:
break
else:
console.write (buf)
if len(buf) < 256: break
# Command output
rv = console.cout.getvalue()
if rv:
rv = rv.strip('\n')
console.write (rv)
if rv:
console.write ('\n')
console.cout.truncate(0)
console.prompt()
def complete(self, line):
split_line = self.complete_sep.split(line)
possibilities = self.IP.complete(split_line[-1])
if possibilities:
common_prefix = os.path.commonprefix (possibilities)
completed = line[:-len(split_line[-1])]+common_prefix
else:
completed = line
return completed, possibilities
def shell(self, cmd,verbose=0,debug=0,header=''):
stat = 0
if verbose or debug: print header+cmd
if not debug:
input, output = os.popen4(cmd)
print output.read()
output.close()
input.close()
| gpl-2.0 |
pducks32/intergrala | python/sympy/sympy/matrices/expressions/hadamard.py | 24 | 2455 | from __future__ import print_function, division
from sympy.core import Mul, Basic, sympify
from sympy.strategies import unpack, flatten, sort, condition, exhaust, do_one
from sympy.matrices.expressions.matexpr import MatrixExpr, ShapeError
def hadamard_product(*matrices):
"""
Return the elementwise (aka Hadamard) product of matrices.
Examples
--------
>>> from sympy.matrices import hadamard_product, MatrixSymbol
>>> A = MatrixSymbol('A', 2, 3)
>>> B = MatrixSymbol('B', 2, 3)
>>> hadamard_product(A)
A
>>> hadamard_product(A, B)
A.*B
>>> hadamard_product(A, B)[0, 1]
A[0, 1]*B[0, 1]
"""
if not matrices:
raise TypeError("Empty Hadamard product is undefined")
validate(*matrices)
if len(matrices) == 1:
return matrices[0]
else:
return HadamardProduct(*matrices).doit()
class HadamardProduct(MatrixExpr):
"""
Elementwise product of matrix expressions
This is a symbolic object that simply stores its argument without
evaluating it. To actually compute the product, use the function
``hadamard_product()``.
>>> from sympy.matrices import hadamard_product, HadamardProduct, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> isinstance(hadamard_product(A, B), HadamardProduct)
True
"""
is_HadamardProduct = True
def __new__(cls, *args, **kwargs):
args = list(map(sympify, args))
check = kwargs.get('check' , True)
if check:
validate(*args)
return super(HadamardProduct, cls).__new__(cls, *args)
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j):
return Mul(*[arg._entry(i, j) for arg in self.args])
def _eval_transpose(self):
from sympy.matrices.expressions.transpose import transpose
return HadamardProduct(*list(map(transpose, self.args)))
def doit(self, **ignored):
return canonicalize(self)
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError("Matrices %s and %s are not aligned" % (A, B))
rules = (unpack,
flatten)
canonicalize = exhaust(condition(lambda x: isinstance(x, HadamardProduct),
do_one(*rules)))
| mit |
conwayje/ase-python | ase/gui/images.py | 2 | 16006 | from math import sqrt
import numpy as np
from ase.data import covalent_radii
from ase.atoms import Atoms
from ase.calculators.singlepoint import SinglePointCalculator
from ase.io import read, write, string2index
from ase.constraints import FixAtoms
from ase.gui.defaults import read_defaults
from ase.quaternions import Quaternion
class Images:
def __init__(self, images=None):
if images is not None:
self.initialize(images)
def initialize(self, images, filenames=None, init_magmom=False):
self.natoms = len(images[0])
self.nimages = len(images)
if filenames is None:
filenames = [None] * self.nimages
self.filenames = filenames
if hasattr(images[0], 'get_shapes'):
self.Q = np.empty((self.nimages, self.natoms, 4))
self.shapes = images[0].get_shapes()
import os as os
if os.path.exists('shapes'):
shapesfile = open('shapes')
lines = shapesfile.readlines()
shapesfile.close()
if '#{type:(shape_x,shape_y,shape_z), .....,}' in lines[0]:
shape = eval(lines[1])
shapes=[]
for an in images[0].get_atomic_numbers():
shapes.append(shape[an])
self.shapes = np.array(shapes)
else:
print 'shape file has wrong format'
else:
print 'no shapesfile found: default shapes were used!'
else:
self.shapes = None
self.P = np.empty((self.nimages, self.natoms, 3))
self.V = np.empty((self.nimages, self.natoms, 3))
self.E = np.empty(self.nimages)
self.K = np.empty(self.nimages)
self.F = np.empty((self.nimages, self.natoms, 3))
self.M = np.empty((self.nimages, self.natoms))
self.T = np.empty((self.nimages, self.natoms), int)
self.A = np.empty((self.nimages, 3, 3))
self.D = np.empty((self.nimages, 3))
self.Z = images[0].get_atomic_numbers()
self.q = np.empty((self.nimages, self.natoms))
self.pbc = images[0].get_pbc()
self.covalent_radii = covalent_radii
config = read_defaults()
if config['covalent_radii'] is not None:
for data in config['covalent_radii']:
self.covalent_radii[data[0]] = data[1]
warning = False
for i, atoms in enumerate(images):
natomsi = len(atoms)
if (natomsi != self.natoms or
(atoms.get_atomic_numbers() != self.Z).any()):
raise RuntimeError('Can not handle different images with ' +
'different numbers of atoms or different ' +
'kinds of atoms!')
self.P[i] = atoms.get_positions()
self.V[i] = atoms.get_velocities()
if hasattr(self, 'Q'):
self.Q[i] = atoms.get_quaternions()
self.A[i] = atoms.get_cell()
self.D[i] = atoms.get_celldisp().reshape((3,))
if (atoms.get_pbc() != self.pbc).any():
warning = True
try:
self.E[i] = atoms.get_potential_energy()
except RuntimeError:
self.E[i] = np.nan
self.K[i] = atoms.get_kinetic_energy()
try:
self.F[i] = atoms.get_forces(apply_constraint=False)
except RuntimeError:
self.F[i] = np.nan
try:
if init_magmom:
self.M[i] = atoms.get_initial_magnetic_moments()
else:
self.M[i] = atoms.get_magnetic_moments()
except (RuntimeError, AttributeError):
self.M[i] = atoms.get_initial_magnetic_moments()
try:
self.q[i] = atoms.get_charges()
except RuntimeError:
self.q[i] = np.nan
# added support for tags
try:
self.T[i] = atoms.get_tags()
except RuntimeError:
self.T[i] = 0
if warning:
print('WARNING: Not all images have the same bondary conditions!')
self.selected = np.zeros(self.natoms, bool)
self.selected_ordered = []
self.atoms_to_rotate_0 = np.zeros(self.natoms, bool)
self.visible = np.ones(self.natoms, bool)
self.nselected = 0
self.set_dynamic(constraints = images[0].constraints)
self.repeat = np.ones(3, int)
self.set_radii(config['radii_scale'])
def prepare_new_atoms(self):
"Marks that the next call to append_atoms should clear the images."
self.next_append_clears = True
def append_atoms(self, atoms, filename=None):
"Append an atoms object to the images already stored."
assert len(atoms) == self.natoms
if self.next_append_clears:
i = 0
else:
i = self.nimages
for name in ('P', 'V', 'E', 'K', 'F', 'M', 'A', 'T'):
a = getattr(self, name)
newa = np.empty( (i+1,) + a.shape[1:], a.dtype )
if not self.next_append_clears:
newa[:-1] = a
setattr(self, name, newa)
self.next_append_clears = False
self.P[i] = atoms.get_positions()
self.V[i] = atoms.get_velocities()
self.A[i] = atoms.get_cell()
try:
self.E[i] = atoms.get_potential_energy()
except RuntimeError:
self.E[i] = np.nan
self.K[i] = atoms.get_kinetic_energy()
try:
self.F[i] = atoms.get_forces(apply_constraint=False)
except RuntimeError:
self.F[i] = np.nan
try:
self.M[i] = atoms.get_magnetic_moments()
except (RuntimeError, AttributeError):
self.M[i] = np.nan
try:
self.T[i] = atoms.get_tags()
except AttributeError:
if i == 0:
self.T[i] = 0
else:
self.T[i] = self.T[i-1]
self.nimages = i + 1
self.filenames.append(filename)
self.set_dynamic()
return self.nimages
def set_radii(self, scale):
if self.shapes == None:
self.r = self.covalent_radii[self.Z] * scale
else:
self.r = np.sqrt(np.sum(self.shapes**2, axis=1)) * scale
def read(self, filenames, index=-1, filetype=None):
images = []
names = []
for filename in filenames:
i = read(filename, index,filetype)
if not isinstance(i, list):
i = [i]
images.extend(i)
names.extend([filename] * len(i))
self.initialize(images, names)
def import_atoms(self, filename, cur_frame):
if filename:
filename = filename[0]
old_a = self.get_atoms(cur_frame)
imp_a = read(filename, -1)
new_a = old_a + imp_a
self.initialize([new_a], [filename])
def repeat_images(self, repeat):
n = self.repeat.prod()
repeat = np.array(repeat)
self.repeat = repeat
N = repeat.prod()
natoms = self.natoms // n
P = np.empty((self.nimages, natoms * N, 3))
V = np.empty((self.nimages, natoms * N, 3))
M = np.empty((self.nimages, natoms * N))
T = np.empty((self.nimages, natoms * N), int)
F = np.empty((self.nimages, natoms * N, 3))
Z = np.empty(natoms * N, int)
r = np.empty(natoms * N)
dynamic = np.empty(natoms * N, bool)
a0 = 0
for i0 in range(repeat[0]):
for i1 in range(repeat[1]):
for i2 in range(repeat[2]):
a1 = a0 + natoms
for i in range(self.nimages):
P[i, a0:a1] = (self.P[i, :natoms] +
np.dot((i0, i1, i2), self.A[i]))
V[:, a0:a1] = self.V[:, :natoms]
F[:, a0:a1] = self.F[:, :natoms]
M[:, a0:a1] = self.M[:, :natoms]
T[:, a0:a1] = self.T[:, :natoms]
Z[a0:a1] = self.Z[:natoms]
r[a0:a1] = self.r[:natoms]
dynamic[a0:a1] = self.dynamic[:natoms]
a0 = a1
self.P = P
self.V = V
self.F = F
self.Z = Z
self.T = T
self.M = M
self.r = r
self.dynamic = dynamic
self.natoms = natoms * N
self.selected = np.zeros(natoms * N, bool)
self.atoms_to_rotate_0 = np.zeros(self.natoms, bool)
self.visible = np.ones(natoms * N, bool)
self.nselected = 0
def center(self):
""" center each image in the existing unit cell, keeping the cell constant. """
c = self.A.sum(axis=1) / 2.0 - self.P.mean(axis=1)
self.P += c[:, np.newaxis, :]
def graph(self, expr):
""" routine to create the data in ag graphs, defined by the string expr. """
import ase.units as units
code = compile(expr + ',', 'atoms.py', 'eval')
n = self.nimages
def d(n1, n2):
return sqrt(((R[n1] - R[n2])**2).sum())
def a(n1, n2, n3):
v1 = R[n1]-R[n2]
v2 = R[n3]-R[n2]
arg = np.vdot(v1,v2)/(sqrt((v1**2).sum()*(v2**2).sum()))
if arg > 1.0: arg = 1.0
if arg < -1.0: arg = -1.0
return 180.0*np.arccos(arg)/np.pi
def dih(n1, n2, n3, n4):
# vector 0->1, 1->2, 2->3 and their normalized cross products:
a = R[n2]-R[n1]
b = R[n3]-R[n2]
c = R[n4]-R[n3]
bxa = np.cross(b,a)
bxa /= np.sqrt(np.vdot(bxa,bxa))
cxb = np.cross(c,b)
cxb /= np.sqrt(np.vdot(cxb,cxb))
angle = np.vdot(bxa,cxb)
# check for numerical trouble due to finite precision:
if angle < -1: angle = -1
if angle > 1: angle = 1
angle = np.arccos(angle)
if (np.vdot(bxa,c)) > 0: angle = 2*np.pi-angle
return angle*180.0/np.pi
# get number of mobile atoms for temperature calculation
ndynamic = 0
for dyn in self.dynamic:
if dyn: ndynamic += 1
S = self.selected
D = self.dynamic[:, np.newaxis]
E = self.E
s = 0.0
data = []
for i in range(n):
R = self.P[i]
V = self.V[i]
F = self.F[i]
A = self.A[i]
M = self.M[i]
f = ((F * D)**2).sum(1)**.5
fmax = max(f)
fave = f.mean()
epot = E[i]
ekin = self.K[i]
e = epot + ekin
T = 2.0 * ekin / (3.0 * ndynamic * units.kB)
data = eval(code)
if i == 0:
m = len(data)
xy = np.empty((m, n))
xy[:, i] = data
if i + 1 < n:
s += sqrt(((self.P[i + 1] - R)**2).sum())
return xy
def set_dynamic(self, constraints = None):
self.dynamic = np.ones(self.natoms, bool)
if constraints is not None:
for con in constraints:
if isinstance(con,FixAtoms):
self.dynamic[con.index] = False
def write(self, filename, rotations='', show_unit_cell=False, bbox=None, **kwargs):
indices = range(self.nimages)
p = filename.rfind('@')
if p != -1:
try:
slice = string2index(filename[p + 1:])
except ValueError:
pass
else:
indices = indices[slice]
filename = filename[:p]
if isinstance(indices, int):
indices = [indices]
images = [self.get_atoms(i) for i in indices]
if len(filename) > 4 and filename[-4:] in ['.eps', '.png', '.pov']:
write(filename, images,
rotation=rotations, show_unit_cell=show_unit_cell,
bbox=bbox, **kwargs)
else:
write(filename, images, **kwargs)
def get_atoms(self, frame):
atoms = Atoms(positions=self.P[frame],
numbers=self.Z,
magmoms=self.M[0],
tags=self.T[frame],
cell=self.A[frame],
pbc=self.pbc)
if not np.isnan(self.V).any():
atoms.set_velocities(self.V[frame])
# check for constrained atoms and add them accordingly:
if not self.dynamic.all():
atoms.set_constraint(FixAtoms(mask=1-self.dynamic))
atoms.set_calculator(SinglePointCalculator(self.E[frame],
self.F[frame],
None, None, atoms))
return atoms
def delete(self, i):
self.nimages -= 1
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
A = np.empty((self.nimages, 3, 3))
E = np.empty(self.nimages)
P[:i] = self.P[:i]
P[i:] = self.P[i + 1:]
self.P = P
V[:i] = self.V[:i]
V[i:] = self.V[i + 1:]
self.V = V
F[:i] = self.F[:i]
F[i:] = self.F[i + 1:]
self.F = F
A[:i] = self.A[:i]
A[i:] = self.A[i + 1:]
self.A = A
E[:i] = self.E[:i]
E[i:] = self.E[i + 1:]
self.E = E
del self.filenames[i]
def aneb(self):
n = self.nimages
assert n % 5 == 0
levels = n // 5
n = self.nimages = 2 * levels + 3
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
E = np.empty(self.nimages)
for L in range(levels):
P[L] = self.P[L * 5]
P[n - L - 1] = self.P[L * 5 + 4]
V[L] = self.V[L * 5]
V[n - L - 1] = self.V[L * 5 + 4]
F[L] = self.F[L * 5]
F[n - L - 1] = self.F[L * 5 + 4]
E[L] = self.E[L * 5]
E[n - L - 1] = self.E[L * 5 + 4]
for i in range(3):
P[levels + i] = self.P[levels * 5 - 4 + i]
V[levels + i] = self.V[levels * 5 - 4 + i]
F[levels + i] = self.F[levels * 5 - 4 + i]
E[levels + i] = self.E[levels * 5 - 4 + i]
self.P = P
self.V = V
self.F = F
self.E = E
def interpolate(self, m):
assert self.nimages == 2
self.nimages = 2 + m
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
A = np.empty((self.nimages, 3, 3))
E = np.empty(self.nimages)
P[0] = self.P[0]
V[0] = self.V[0]
F[0] = self.F[0]
A[0] = self.A[0]
E[0] = self.E[0]
for i in range(1, m + 1):
x = i / (m + 1.0)
y = 1 - x
P[i] = y * self.P[0] + x * self.P[1]
V[i] = y * self.V[0] + x * self.V[1]
F[i] = y * self.F[0] + x * self.F[1]
A[i] = y * self.A[0] + x * self.A[1]
E[i] = y * self.E[0] + x * self.E[1]
P[-1] = self.P[1]
V[-1] = self.V[1]
F[-1] = self.F[1]
A[-1] = self.A[1]
E[-1] = self.E[1]
self.P = P
self.V = V
self.F = F
self.A = A
self.E = E
self.filenames[1:1] = [None] * m
if __name__ == '__main__':
import os
os.system('python gui.py')
| gpl-2.0 |
jostep/tensorflow | tensorflow/contrib/tensorboard/plugins/projector/__init__.py | 97 | 2351 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for the Embedding Projector.
@@ProjectorPluginAsset
@@ProjectorConfig
@@EmbeddingInfo
@@EmbeddingMetadata
@@SpriteMetadata
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector import projector_config_pb2
# pylint: disable=wildcard-import
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import *
# pylint: enable=wildcard-import
from tensorflow.python.lib.io import file_io
def visualize_embeddings(summary_writer, config):
"""Stores a config file used by the embedding projector.
Args:
summary_writer: The summary writer used for writing events.
config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
proto that holds the configuration for the projector such as paths to
checkpoint files and metadata files for the embeddings. If
`config.model_checkpoint_path` is none, it defaults to the
`logdir` used by the summary_writer.
Raises:
ValueError: If the summary writer does not have a `logdir`.
"""
logdir = summary_writer.get_logdir()
# Sanity checks.
if logdir is None:
raise ValueError('Summary writer must have a logdir')
# Saving the config file in the logdir.
config_pbtxt = text_format.MessageToString(config)
# FYI - the 'projector_config.pbtxt' string is hardcoded in the projector
# plugin.
# TODO(dandelion): Restore this to a reference to the projector plugin
file_io.write_string_to_file(
os.path.join(logdir, 'projector_config.pbtxt'), config_pbtxt)
| apache-2.0 |
openstack-hyper-v-python/numpy | numpy/doc/indexing.py | 12 | 14353 | """
==============
Array indexing
==============
Array indexing refers to any use of the square brackets ([]) to index
array values. There are many options to indexing, which give numpy
indexing great power, but with power comes some complexity and the
potential for confusion. This section is just an overview of the
various options and issues related to indexing. Aside from single
element indexing, the details on most of these options are to be
found in related sections.
Assignment vs referencing
=========================
Most of the following examples show the use of indexing when
referencing data in an array. The examples work just as well
when assigning to an array. See the section at the end for
specific examples and explanations on how assignments work.
Single element indexing
=======================
Single element indexing for a 1-D array is what one expects. It work
exactly like that for other standard Python sequences. It is 0-based,
and accepts negative indices for indexing from the end of the array. ::
>>> x = np.arange(10)
>>> x[2]
2
>>> x[-2]
8
Unlike lists and tuples, numpy arrays support multidimensional indexing
for multidimensional arrays. That means that it is not necessary to
separate each dimension's index into its own set of square brackets. ::
>>> x.shape = (2,5) # now x is 2-dimensional
>>> x[1,3]
8
>>> x[1,-1]
9
Note that if one indexes a multidimensional array with fewer indices
than dimensions, one gets a subdimensional array. For example: ::
>>> x[0]
array([0, 1, 2, 3, 4])
That is, each index specified selects the array corresponding to the
rest of the dimensions selected. In the above example, choosing 0
means that remaining dimension of lenth 5 is being left unspecified,
and that what is returned is an array of that dimensionality and size.
It must be noted that the returned array is not a copy of the original,
but points to the same values in memory as does the original array.
In this case, the 1-D array at the first position (0) is returned.
So using a single index on the returned array, results in a single
element being returned. That is: ::
>>> x[0][2]
2
So note that ``x[0,2] = x[0][2]`` though the second case is more
inefficient a new temporary array is created after the first index
that is subsequently indexed by 2.
Note to those used to IDL or Fortran memory order as it relates to
indexing. Numpy uses C-order indexing. That means that the last
index usually represents the most rapidly changing memory location,
unlike Fortran or IDL, where the first index represents the most
rapidly changing location in memory. This difference represents a
great potential for confusion.
Other indexing options
======================
It is possible to slice and stride arrays to extract arrays of the
same number of dimensions, but of different sizes than the original.
The slicing and striding works exactly the same way it does for lists
and tuples except that they can be applied to multiple dimensions as
well. A few examples illustrates best: ::
>>> x = np.arange(10)
>>> x[2:5]
array([2, 3, 4])
>>> x[:-7]
array([0, 1, 2])
>>> x[1:7:2]
array([1, 3, 5])
>>> y = np.arange(35).reshape(5,7)
>>> y[1:5:2,::3]
array([[ 7, 10, 13],
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
also produce new views of the original data.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
two different ways of accomplishing this. One uses one or more arrays
of index values. The other involves giving a boolean array of the proper
shape to indicate the values to be selected. Index arrays are a very
powerful tool that allow one to avoid looping over individual elements in
arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
array aquires the shape needed for use in an expression or with a
specific function.
Index arrays
============
Numpy arrays may be indexed with other arrays (or any other sequence-
like object that can be converted to an array, such as lists, with the
exception of tuples; see the end of this document for why this is). The
use of index arrays ranges from simple, straightforward cases to
complex, hard-to-understand cases. For all cases of index arrays, what
is returned is a copy of the original data, not a view as one gets for
slices.
Index arrays must be of integer type. Each value in the array indicates
which value in the array to use in place of the index. To illustrate: ::
>>> x = np.arange(10,1,-1)
>>> x
array([10, 9, 8, 7, 6, 5, 4, 3, 2])
>>> x[np.array([3, 3, 1, 8])]
array([7, 7, 9, 2])
The index array consisting of the values 3, 3, 1 and 8 correspondingly
create an array of length 4 (same as the index array) where each index
is replaced by the value the index array has in the array being indexed.
Negative values are permitted and work as they do with single indices
or slices: ::
>>> x[np.array([3,3,-3,8])]
array([7, 7, 4, 2])
It is an error to have index values out of bounds: ::
>>> x[np.array([3, 3, 20, 8])]
<type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
Generally speaking, what is returned when index arrays are used is
an array with the same shape as the index array, but with the type
and values of the array being indexed. As an example, we can use a
multidimensional index array instead: ::
>>> x[np.array([[1,1],[2,3]])]
array([[9, 9],
[8, 7]])
Indexing Multi-dimensional arrays
=================================
Things become more complex when multidimensional arrays are indexed,
particularly with multidimensional index arrays. These tend to be
more unusal uses, but theyare permitted, and they are useful for some
problems. We'll start with thesimplest multidimensional case (using
the array y from the previous examples): ::
>>> y[np.array([0,2,4]), np.array([0,1,2])]
array([ 0, 15, 30])
In this case, if the index arrays have a matching shape, and there is
an index array for each dimension of the array being indexed, the
resultant array has the same shape as the index arrays, and the values
correspond to the index set for each position in the index arrays. In
this example, the first index value is 0 for both index arrays, and
thus the first value of the resultant array is y[0,0]. The next value
is y[2,1], and the last is y[4,2].
If the index arrays do not have the same shape, there is an attempt to
broadcast them to the same shape. If they cannot be broadcast to the
same shape, an exception is raised: ::
>>> y[np.array([0,2,4]), np.array([0,1])]
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be
broadcast to a single shape
The broadcasting mechanism permits index arrays to be combined with
scalars for other indices. The effect is that the scalar value is used
for all the corresponding values of the index arrays: ::
>>> y[np.array([0,2,4]), 1]
array([ 1, 15, 29])
Jumping to the next level of complexity, it is possible to only
partially index an array with index arrays. It takes a bit of thought
to understand what happens in such cases. For example if we just use
one index array with y: ::
>>> y[np.array([0,2,4])]
array([[ 0, 1, 2, 3, 4, 5, 6],
[14, 15, 16, 17, 18, 19, 20],
[28, 29, 30, 31, 32, 33, 34]])
What results is the construction of a new array where each value of
the index array selects one row from the array being indexed and the
resultant array has the resulting shape (size of row, number index
elements).
An example of where this may be useful is for a color lookup table
where we want to map the values of an image into RGB triples for
display. The lookup table could have a shape (nlookup, 3). Indexing
such an array with an image with shape (ny, nx) with dtype=np.uint8
(or any integer type so long as values are with the bounds of the
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
In general, the shape of the resulant array will be the concatenation
of the shape of the index array (or the shape that all the index arrays
were broadcast to) with the shape of any unused dimensions (those not
indexed) in the array being indexed.
Boolean or "mask" index arrays
==============================
Boolean arrays used as indices are treated in a different manner
entirely than index arrays. Boolean arrays must be of the same shape
as the array being indexed, or broadcastable to the same shape. In the
most straightforward case, the boolean array has the same shape: ::
>>> b = y>20
>>> y[b]
array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
The result is a 1-D array containing all the elements in the indexed
array corresponding to all the true elements in the boolean array. As
with index arrays, what is returned is a copy of the data, not a view
as one gets with slices.
With broadcasting, multidimensional arrays may be the result. For
example: ::
>>> b[:,5] # use a 1-D boolean that broadcasts with y
array([False, False, False, True, True], dtype=bool)
>>> y[b[:,5]]
array([[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 31, 32, 33, 34]])
Here the 4th and 5th rows are selected from the indexed array and
combined to make a 2-D array.
Combining index arrays with slices
==================================
Index arrays may be combined with slices. For example: ::
>>> y[np.array([0,2,4]),1:3]
array([[ 1, 2],
[15, 16],
[29, 30]])
In effect, the slice is converted to an index array
np.array([[1,2]]) (shape (1,2)) that is broadcast with the index array
to produce a resultant array of shape (3,2).
Likewise, slicing can be combined with broadcasted boolean indices: ::
>>> y[b[:,5],1:3]
array([[22, 23],
[29, 30]])
Structural indexing tools
=========================
To facilitate easy matching of array shapes with expressions and in
assignments, the np.newaxis object can be used within array indices
to add new dimensions with a size of 1. For example: ::
>>> y.shape
(5, 7)
>>> y[:,np.newaxis,:].shape
(5, 1, 7)
Note that there are no new elements in the array, just that the
dimensionality is increased. This can be handy to combine two
arrays in a way that otherwise would require explicitly reshaping
operations. For example: ::
>>> x = np.arange(5)
>>> x[:,np.newaxis] + x[np.newaxis,:]
array([[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8]])
The ellipsis syntax maybe used to indicate selecting in full any
remaining unspecified dimensions. For example: ::
>>> z = np.arange(81).reshape(3,3,3,3)
>>> z[1,...,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
This is equivalent to: ::
>>> z[1,:,:,2]
array([[29, 32, 35],
[38, 41, 44],
[47, 50, 53]])
Assigning values to indexed arrays
==================================
As mentioned, one can select a subset of an array to assign to using
a single index, slices, and index and mask arrays. The value being
assigned to the indexed array must be shape consistent (the same shape
or broadcastable to the shape the index produces). For example, it is
permitted to assign a constant to a slice: ::
>>> x = np.arange(10)
>>> x[2:7] = 1
or an array of the right size: ::
>>> x[2:7] = np.arange(5)
Note that assignments may result in changes if assigning
higher types to lower types (like floats to ints) or even
exceptions (assigning complex to floats or ints): ::
>>> x[1] = 1.2
>>> x[1]
1
>>> x[1] = 1.2j
<type 'exceptions.TypeError'>: can't convert complex to long; use
long(abs(z))
Unlike some of the references (such as array and mask indices)
assignments are always made to the original data in the array
(indeed, nothing else would make sense!). Note though, that some
actions may not work as one may naively expect. This particular
example is often surprising to people: ::
>>> x = np.arange(0, 50, 10)
>>> x
array([ 0, 10, 20, 30, 40])
>>> x[np.array([1, 1, 3, 1])] += 1
>>> x
array([ 0, 11, 20, 31, 40])
Where people expect that the 1st location will be incremented by 3.
In fact, it will only be incremented by 1. The reason is because
a new array is extracted from the original (as a temporary) containing
the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
and then the temporary is assigned back to the original array. Thus
the value of the array at x[1]+1 is assigned to x[1] three times,
rather than being incremented 3 times.
Dealing with variable numbers of indices within programs
========================================================
The index syntax is very powerful but limiting when dealing with
a variable number of indices. For example, if you want to write
a function that can handle arguments with various numbers of
dimensions without having to write special case code for each
number of possible dimensions, how can that be done? If one
supplies to the index a tuple, the tuple will be interpreted
as a list of indices. For example (using the previous definition
for the array z): ::
>>> indices = (1,1,1,1)
>>> z[indices]
40
So one can use code to construct tuples of any number of indices
and then use these within an index.
Slices can be specified within programs by using the slice() function
in Python. For example: ::
>>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
>>> z[indices]
array([39, 40])
Likewise, ellipsis can be specified by code by using the Ellipsis
object: ::
>>> indices = (1, Ellipsis, 1) # same as [1,...,1]
>>> z[indices]
array([[28, 31, 34],
[37, 40, 43],
[46, 49, 52]])
For this reason it is possible to use the output from the np.where()
function directly as an index since it always returns a tuple of index
arrays.
Because the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
>>> z[[1,1,1,1]] # produces a large array
array([[[[27, 28, 29],
[30, 31, 32], ...
>>> z[(1,1,1,1)] # returns a single value
40
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
Livefyre/django-cms | cms/south_migrations/0053_auto__add_field_title_published__add_field_title_publisher_is_draft__a.py | 63 | 20404 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Title.published'
db.add_column(u'cms_title', 'published',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Title.publisher_is_draft'
db.add_column(u'cms_title', 'publisher_is_draft',
self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True),
keep_default=False)
# Adding field 'Title.publisher_public'
db.add_column(u'cms_title', 'publisher_public',
self.gf('django.db.models.fields.related.OneToOneField')(related_name='publisher_draft', unique=True, null=True, to=orm['cms.Title']),
keep_default=False)
# Adding field 'Title.publisher_state'
db.add_column(u'cms_title', 'publisher_state',
self.gf('django.db.models.fields.SmallIntegerField')(default=0, db_index=True),
keep_default=False)
# Adding field 'Page.published_languages'
db.add_column(u'cms_page', 'published_languages',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Page.languages'
db.add_column(u'cms_page', 'languages',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Title.published'
db.delete_column(u'cms_title', 'published')
# Deleting field 'Title.publisher_is_draft'
db.delete_column(u'cms_title', 'publisher_is_draft')
# Deleting field 'Title.publisher_public'
db.delete_column(u'cms_title', 'publisher_public_id')
# Deleting field 'Title.publisher_state'
db.delete_column(u'cms_title', 'publisher_state')
# Deleting field 'Page.published_languages'
db.delete_column(u'cms_page', 'published_languages')
# Deleting field 'Page.languages'
db.delete_column(u'cms_page', 'languages')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'published_languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', 'db_table': "u'cmsplugin_placeholderreference'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause |
unreal666/youtube-dl | youtube_dl/extractor/vidbit.py | 64 | 2917 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
js_to_json,
remove_end,
unified_strdate,
)
class VidbitIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vidbit\.co/(?:watch|embed)\?.*?\bv=(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'http://www.vidbit.co/watch?v=jkL2yDOEq2',
'md5': '1a34b7f14defe3b8fafca9796892924d',
'info_dict': {
'id': 'jkL2yDOEq2',
'ext': 'mp4',
'title': 'Intro to VidBit',
'description': 'md5:5e0d6142eec00b766cbf114bfd3d16b7',
'thumbnail': r're:https?://.*\.jpg$',
'upload_date': '20160618',
'view_count': int,
'comment_count': int,
}
}, {
'url': 'http://www.vidbit.co/embed?v=jkL2yDOEq2&auto=0&water=0',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
compat_urlparse.urljoin(url, '/watch?v=%s' % video_id), video_id)
video_url, title = [None] * 2
config = self._parse_json(self._search_regex(
r'(?s)\.setup\(({.+?})\);', webpage, 'setup', default='{}'),
video_id, transform_source=js_to_json)
if config:
if config.get('file'):
video_url = compat_urlparse.urljoin(url, config['file'])
title = config.get('title')
if not video_url:
video_url = compat_urlparse.urljoin(url, self._search_regex(
r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage, 'video URL', group='url'))
if not title:
title = remove_end(
self._html_search_regex(
(r'<h1>(.+?)</h1>', r'<title>(.+?)</title>'),
webpage, 'title', default=None) or self._og_search_title(webpage),
' - VidBit')
description = self._html_search_meta(
('description', 'og:description', 'twitter:description'),
webpage, 'description')
upload_date = unified_strdate(self._html_search_meta(
'datePublished', webpage, 'upload date'))
view_count = int_or_none(self._search_regex(
r'<strong>(\d+)</strong> views',
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
r'id=["\']cmt_num["\'][^>]*>\((\d+)\)',
webpage, 'comment count', fatal=False))
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage),
'upload_date': upload_date,
'view_count': view_count,
'comment_count': comment_count,
}
| unlicense |
xzh86/scikit-learn | sklearn/externals/joblib/__init__.py | 86 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
gmariotti/lassim | source/core/core_system.py | 1 | 5422 | from sortedcontainers import SortedDict, SortedSet
__author__ = "Guido Pio Mariotti"
__copyright__ = "Copyright (C) 2016 Guido Pio Mariotti"
__license__ = "GNU General Public License v3.0"
__version__ = "0.1.0"
# TODO - consider the possibility of a factory method for creating a new
# CoreSystem starting from the map of reactions. Should it be a static
# method? Or can be exploited the list of tfacts of an existing object?
class CoreSystem:
def __init__(self, network: SortedDict, correction: bool = True):
"""
Network must be something generated by the reading of the file with
the transcription factor, in the form of a dictionary
:param network: dictionary with, for each transcription factor,
the set of transcription factors on which has influence
:param correction: decides if transcription factors with no reactions
must be "corrected" or not with a reaction with all the other
transcription factors plus itself
"""
self._network = network
self.tfacts = SortedSet(network.keys()).union(
{tfact for tflist in network.viewvalues() for tfact in tflist}
)
self.__reactions, self.__react_count = self.__reactions_from_network(
correction
)
def __reactions_from_network(self, correction: bool) -> (SortedDict, int):
"""
The dictionary of reactions returned is reversed in respect to the
network one. Each transcription factor has the set of transcription
factors that affect him
:param correction: decides if transcription factors with no reactions
must be "corrected" or not with a reaction with all the other
transcription factors plus itself
:return: reactions dictionary and number of reactions
"""
reactions_sorted = SortedDict()
reactions_count = 0
for tfact, reactions in self._network.items():
# in this way, even if a transcription factor is not influenced
# by anyone, is still have is empty set of reactions
if tfact not in reactions_sorted:
reactions_sorted[tfact] = SortedSet()
for reaction in reactions:
if reaction not in reactions_sorted:
reactions_sorted[reaction] = SortedSet()
reactions_sorted[reaction].add(tfact)
reactions_count += 1
if correction:
# for each empty set, all the transcription factors are added
for tfact, reactions in reactions_sorted.items():
if len(reactions) == 0:
reactions_sorted[tfact] = SortedSet(self.tfacts)
reactions_count += len(self.tfacts)
return reactions_sorted, reactions_count
def from_tfacts_to_ids(self) -> (str, int):
"""
Maps each transcription factor to an id, starting from 0 until
len(tfacts) - 1
:return: (tfact, id)
"""
for i in range(0, len(self.tfacts)):
yield self.tfacts[i], i
def from_ids_to_tfacts(self) -> (str, int):
"""
Maps each id to its transcription factor. Result is always the same
because the transcription factors are an ordered set
:return: (id, tfact)
"""
for i in range(0, len(self.tfacts)):
yield i, self.tfacts[i]
def from_reactions_to_ids(self) -> (int, SortedSet):
"""
Maps each reaction with its corresponding id.
:return: (id, set(reactions_ids))
"""
tfacts_ids = {key: value for key, value in self.from_tfacts_to_ids()}
for tfact, reactions in self.reactions.items():
reactions_ids = SortedSet([tfacts_ids[tf] for tf in reactions])
yield tfacts_ids[tfact], reactions_ids
def from_reactions_ids_to_str(self, reactions: SortedDict
) -> (str, SortedSet):
"""
Maps a dictionary of reactions from ids to string names.
:param reactions: Dictionary of <int:set(int)> representing reactions as
integer numbers
:return: The same dictionary as input with string names instead of
integer ids.
"""
tfacts_ids = {key: value for key, value in self.from_ids_to_tfacts()}
for key, value in reactions.viewitems():
try:
react_set = SortedSet([tfacts_ids[val] for val in value])
yield tfacts_ids[key], react_set
except AttributeError:
raise AttributeError(
"Value in key and/or value not present in core\nkey -> {}\n"
"value -> {}".format(key, value)
)
@property
def num_tfacts(self):
return len(self.tfacts)
@property
def reactions(self):
return self.__reactions
@property
def react_count(self):
return self.__react_count
def __str__(self):
title = "== Core =="
tfacts_title = "= List of transcription factors ="
tfacts = ", ".join(self.tfacts)
reactions_title = "= List of reactions ="
reactions = "\n".join([key + " --> " + ", ".join(value)
for key, value in self.reactions.viewitems()])
return "\n".join(
[title, tfacts_title, tfacts, reactions_title, reactions]
)
| gpl-3.0 |
hefen1/chromium | third_party/mojo/src/mojo/public/third_party/jinja2/environment.py | 614 | 47244 | # -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.nodes import EvalContext
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound, TemplateRuntimeError
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode
from jinja2._compat import imap, ifilter, string_types, iteritems, \
text_type, reraise, implements_iterator, implements_to_string, \
get_next, encode_filename, PY2, PYPY
from functools import reduce
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instantiated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, string_types):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def add_extension(self, extension):
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in iteritems(attributes):
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def call_filter(self, name, value, args=None, kwargs=None,
context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.filters.get(name)
if func is None:
raise TemplateRuntimeError('no filter named %r' % name)
args = [value] + list(args or ())
if getattr(func, 'contextfilter', False):
if context is None:
raise TemplateRuntimeError('Attempted to invoke context '
'filter without context')
args.insert(0, context)
elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(*args, **(kwargs or {}))
def call_test(self, name, value, args=None, kwargs=None):
"""Invokes a test on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.tests.get(name)
if func is None:
raise TemplateRuntimeError('no test named %r' % name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = text_type(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), text_type(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
def _generate(self, source, name, filename, defer_init=False):
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec')
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
lstrip_blocks, newline_sequence, keep_trailing_newline,
frozenset(extensions), optimized, undefined, finalize, autoescape,
None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(imap(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
@implements_to_string
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@implements_iterator
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, string_types):
fp = open(fp, encoding is None and 'w' or 'wb')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = get_next(self._gen)
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = get_next(generator(get_next(self._gen)))
def __iter__(self):
return self
def __next__(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| bsd-3-clause |
LucasFeliciano21/BeagleBoard-Stable_Linux | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
neerajvashistha/pa-dude | lib/python2.7/site-packages/numpy/polynomial/legendre.py | 75 | 56240 | """
Legendre Series (:mod: `numpy.polynomial.legendre`)
===================================================
.. currentmodule:: numpy.polynomial.polynomial
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
.. autosummary::
:toctree: generated/
legdomain Legendre series default domain, [-1,1].
legzero Legendre series that evaluates identically to 0.
legone Legendre series that evaluates identically to 1.
legx Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
.. autosummary::
:toctree: generated/
legmulx multiply a Legendre series in P_i(x) by x.
legadd add two Legendre series.
legsub subtract one Legendre series from another.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
legpow raise a Legendre series to an positive integer power
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
leggrid2d evaluate a 2D Legendre series on a Cartesian product.
leggrid3d evaluate a 3D Legendre series on a Cartesian product.
Calculus
--------
.. autosummary::
:toctree: generated/
legder differentiate a Legendre series.
legint integrate a Legendre series.
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots create a Legendre series with specified roots.
legroots find the roots of a Legendre series.
legvander Vandermonde-like matrix for Legendre polynomials.
legvander2d Vandermonde-like matrix for 2D power series.
legvander3d Vandermonde-like matrix for 3D power series.
leggauss Gauss-Legendre quadrature, points and weights.
legweight Legendre weight function.
legcompanion symmetrized companion matrix in Legendre form.
legfit least-squares fit returning a Legendre series.
legtrim trim leading coefficients from a Legendre series.
legline Legendre series representing given straight line.
leg2poly convert a Legendre series to a polynomial.
poly2leg convert a polynomial to a Legendre series.
Classes
-------
Legendre A Legendre series class.
See also
--------
numpy.polynomial.polynomial
numpy.polynomial.chebyshev
numpy.polynomial.laguerre
numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = P.Legendre(P.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> c = P.Legendre(range(4))
>>> c
Legendre([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1, 1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, chebfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [legline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [legmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = legmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> P.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
arguement `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmul, legdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = legmul(prd, c)
return prd
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([ 60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
return c
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
return c
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
c = legval(z, c, tensor=False)
return c
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
c = legval(z, c)
return c
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
vz = legvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, polyfit, lagfit, hermfit, hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = legvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, chebroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = legcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
| mit |
chewable/django | django/db/models/related.py | 23 | 2090 | class BoundRelatedObject(object):
def __init__(self, related_object, field_mapping, original):
self.relation = related_object
self.field_mappings = field_mapping[related_object.name]
def template_name(self):
raise NotImplementedError
def __repr__(self):
return repr(self.__dict__)
class RelatedObject(object):
def __init__(self, parent_model, model, field):
self.parent_model = parent_model
self.model = model
self.opts = model._meta
self.field = field
self.name = '%s:%s' % (self.opts.app_label, self.opts.module_name)
self.var_name = self.opts.object_name.lower()
def get_db_prep_lookup(self, lookup_type, value):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value)
def editable_fields(self):
"Get the fields in this class that should be edited inline."
return [f for f in self.opts.fields + self.opts.many_to_many if f.editable and f != self.field]
def __repr__(self):
return "<RelatedObject: %s related to %s>" % (self.name, self.field.name)
def bind(self, field_mapping, original, bound_related_object_class=BoundRelatedObject):
return bound_related_object_class(self, field_mapping, original)
def get_accessor_name(self):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
if self.field.rel.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if getattr(self.field.rel, 'symmetrical', False) and self.model == self.parent_model:
return None
return self.field.rel.related_name or (self.opts.object_name.lower() + '_set')
else:
return self.field.rel.related_name or (self.opts.object_name.lower())
| bsd-3-clause |
kostoulhs/android_kernel_samsung_msm8930-common | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
pajod/django-import-export | tests/core/tests/instance_loaders_tests.py | 7 | 1165 | from __future__ import unicode_literals
import tablib
from django.test import TestCase
from import_export import instance_loaders
from import_export import resources
from core.models import Book
class CachedInstanceLoaderTest(TestCase):
def setUp(self):
self.resource = resources.modelresource_factory(Book)()
self.dataset = tablib.Dataset(headers=['id', 'name', 'author_email'])
self.book = Book.objects.create(name="Some book")
self.book2 = Book.objects.create(name="Some other book")
row = [str(self.book.pk), 'Some book', '[email protected]']
self.dataset.append(row)
self.instance_loader = instance_loaders.CachedInstanceLoader(
self.resource, self.dataset)
def test_all_instances(self):
self.assertTrue(self.instance_loader.all_instances)
self.assertEqual(len(self.instance_loader.all_instances), 1)
self.assertEqual(list(self.instance_loader.all_instances.keys()),
[self.book.pk])
def test_get_instance(self):
obj = self.instance_loader.get_instance(self.dataset.dict[0])
self.assertEqual(obj, self.book)
| bsd-2-clause |
jiangxb1987/spark | examples/src/main/python/ml/binarizer_example.py | 121 | 1521 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.feature import Binarizer
# $example off$
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("BinarizerExample")\
.getOrCreate()
# $example on$
continuousDataFrame = spark.createDataFrame([
(0, 0.1),
(1, 0.8),
(2, 0.2)
], ["id", "feature"])
binarizer = Binarizer(threshold=0.5, inputCol="feature", outputCol="binarized_feature")
binarizedDataFrame = binarizer.transform(continuousDataFrame)
print("Binarizer output with Threshold = %f" % binarizer.getThreshold())
binarizedDataFrame.show()
# $example off$
spark.stop()
| apache-2.0 |
scrollback/kuma | vendor/packages/ipython/IPython/__init__.py | 6 | 2849 | # -*- coding: utf-8 -*-
"""
IPython -- An enhanced Interactive Python
One of Python's nicest features is its interactive interpreter. This allows
very fast testing of ideas without the overhead of creating test files as is
typical in most programming languages. However, the interpreter supplied with
the standard Python distribution is fairly primitive (and IDLE isn't really
much better).
IPython tries to:
i - provide an efficient environment for interactive work in Python
programming. It tries to address what we see as shortcomings of the standard
Python prompt, and adds many features to make interactive work much more
efficient.
ii - offer a flexible framework so that it can be used as the base
environment for other projects and problems where Python can be the
underlying language. Specifically scientific environments like Mathematica,
IDL and Mathcad inspired its design, but similar ideas can be useful in many
fields. Python is a fabulous language for implementing this kind of system
(due to its dynamic and introspective features), and with suitable libraries
entire systems could be built leveraging Python's power.
iii - serve as an embeddable, ready to go interpreter for your own programs.
IPython requires Python 2.4 or newer.
"""
#*****************************************************************************
# Copyright (C) 2008-2009 The IPython Development Team
# Copyright (C) 2001-2007 Fernando Perez. <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Enforce proper version requirements
import sys
if sys.version[0:3] < '2.4':
raise ImportError('Python Version 2.4 or above is required for IPython.')
# Make it easy to import extensions - they are always directly on pythonpath.
# Therefore, non-IPython modules can be added to Extensions directory
import os
sys.path.append(os.path.dirname(__file__) + "/Extensions")
# Define what gets imported with a 'from IPython import *'
__all__ = ['ipapi','generics','ipstruct','Release','Shell']
# Load __all__ in IPython namespace so that a simple 'import IPython' gives
# access to them via IPython.<name>
glob,loc = globals(),locals()
for name in __all__:
#print 'Importing: ',name # dbg
__import__(name,glob,loc,[])
import Shell
# Release data
from IPython import Release # do it explicitly so pydoc can see it - pydoc bug
__author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \
( Release.authors['Fernando'] + Release.authors['Janko'] + \
Release.authors['Nathan'] )
__license__ = Release.license
__version__ = Release.version
__revision__ = Release.revision
# Namespace cleanup
del name,glob,loc
| mpl-2.0 |
junhuac/MQUIC | depot_tools/third_party/coverage/collector.py | 209 | 13412 | """Raw data collector for Coverage."""
import os, sys, threading
try:
# Use the C extension code when we can, for speed.
from coverage.tracer import CTracer # pylint: disable=F0401,E0611
except ImportError:
# Couldn't import the C extension, maybe it isn't built.
if os.getenv('COVERAGE_TEST_TRACER') == 'c':
# During testing, we use the COVERAGE_TEST_TRACER env var to indicate
# that we've fiddled with the environment to test this fallback code.
# If we thought we had a C tracer, but couldn't import it, then exit
# quickly and clearly instead of dribbling confusing errors. I'm using
# sys.exit here instead of an exception because an exception here
# causes all sorts of other noise in unittest.
sys.stderr.write(
"*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n"
)
sys.exit(1)
CTracer = None
class PyTracer(object):
"""Python implementation of the raw data tracer."""
# Because of poor implementations of trace-function-manipulating tools,
# the Python trace function must be kept very simple. In particular, there
# must be only one function ever set as the trace function, both through
# sys.settrace, and as the return value from the trace function. Put
# another way, the trace function must always return itself. It cannot
# swap in other functions, or return None to avoid tracing a particular
# frame.
#
# The trace manipulator that introduced this restriction is DecoratorTools,
# which sets a trace function, and then later restores the pre-existing one
# by calling sys.settrace with a function it found in the current frame.
#
# Systems that use DecoratorTools (or similar trace manipulations) must use
# PyTracer to get accurate results. The command-line --timid argument is
# used to force the use of this tracer.
def __init__(self):
self.data = None
self.should_trace = None
self.should_trace_cache = None
self.warn = None
self.cur_file_data = None
self.last_line = 0
self.data_stack = []
self.last_exc_back = None
self.last_exc_firstlineno = 0
self.arcs = False
self.thread = None
self.stopped = False
def _trace(self, frame, event, arg_unused):
"""The trace function passed to sys.settrace."""
if self.stopped:
return
if 0:
sys.stderr.write("trace event: %s %r @%d\n" % (
event, frame.f_code.co_filename, frame.f_lineno
))
if self.last_exc_back:
if frame == self.last_exc_back:
# Someone forgot a return event.
if self.arcs and self.cur_file_data:
pair = (self.last_line, -self.last_exc_firstlineno)
self.cur_file_data[pair] = None
self.cur_file_data, self.last_line = self.data_stack.pop()
self.last_exc_back = None
if event == 'call':
# Entering a new function context. Decide if we should trace
# in this file.
self.data_stack.append((self.cur_file_data, self.last_line))
filename = frame.f_code.co_filename
if filename not in self.should_trace_cache:
tracename = self.should_trace(filename, frame)
self.should_trace_cache[filename] = tracename
else:
tracename = self.should_trace_cache[filename]
#print("called, stack is %d deep, tracename is %r" % (
# len(self.data_stack), tracename))
if tracename:
if tracename not in self.data:
self.data[tracename] = {}
self.cur_file_data = self.data[tracename]
else:
self.cur_file_data = None
# Set the last_line to -1 because the next arc will be entering a
# code block, indicated by (-1, n).
self.last_line = -1
elif event == 'line':
# Record an executed line.
if self.cur_file_data is not None:
if self.arcs:
#print("lin", self.last_line, frame.f_lineno)
self.cur_file_data[(self.last_line, frame.f_lineno)] = None
else:
#print("lin", frame.f_lineno)
self.cur_file_data[frame.f_lineno] = None
self.last_line = frame.f_lineno
elif event == 'return':
if self.arcs and self.cur_file_data:
first = frame.f_code.co_firstlineno
self.cur_file_data[(self.last_line, -first)] = None
# Leaving this function, pop the filename stack.
self.cur_file_data, self.last_line = self.data_stack.pop()
#print("returned, stack is %d deep" % (len(self.data_stack)))
elif event == 'exception':
#print("exc", self.last_line, frame.f_lineno)
self.last_exc_back = frame.f_back
self.last_exc_firstlineno = frame.f_code.co_firstlineno
return self._trace
def start(self):
"""Start this Tracer.
Return a Python function suitable for use with sys.settrace().
"""
self.thread = threading.currentThread()
sys.settrace(self._trace)
return self._trace
def stop(self):
"""Stop this Tracer."""
self.stopped = True
if self.thread != threading.currentThread():
# Called on a different thread than started us: we can't unhook
# ourseves, but we've set the flag that we should stop, so we won't
# do any more tracing.
return
if hasattr(sys, "gettrace") and self.warn:
if sys.gettrace() != self._trace:
msg = "Trace function changed, measurement is likely wrong: %r"
self.warn(msg % (sys.gettrace(),))
#print("Stopping tracer on %s" % threading.current_thread().ident)
sys.settrace(None)
def get_stats(self):
"""Return a dictionary of statistics, or None."""
return None
class Collector(object):
"""Collects trace data.
Creates a Tracer object for each thread, since they track stack
information. Each Tracer points to the same shared data, contributing
traced data points.
When the Collector is started, it creates a Tracer for the current thread,
and installs a function to create Tracers for each new thread started.
When the Collector is stopped, all active Tracers are stopped.
Threads started while the Collector is stopped will never have Tracers
associated with them.
"""
# The stack of active Collectors. Collectors are added here when started,
# and popped when stopped. Collectors on the stack are paused when not
# the top, and resumed when they become the top again.
_collectors = []
def __init__(self, should_trace, timid, branch, warn):
"""Create a collector.
`should_trace` is a function, taking a filename, and returning a
canonicalized filename, or None depending on whether the file should
be traced or not.
If `timid` is true, then a slower simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions make the faster more sophisticated trace function not
operate properly.
If `branch` is true, then branches will be measured. This involves
collecting data on which statements followed each other (arcs). Use
`get_arc_data` to get the arc data.
`warn` is a warning function, taking a single string message argument,
to be used if a warning needs to be issued.
"""
self.should_trace = should_trace
self.warn = warn
self.branch = branch
self.reset()
if timid:
# Being timid: use the simple Python trace function.
self._trace_class = PyTracer
else:
# Being fast: use the C Tracer if it is available, else the Python
# trace function.
self._trace_class = CTracer or PyTracer
def __repr__(self):
return "<Collector at 0x%x>" % id(self)
def tracer_name(self):
"""Return the class name of the tracer we're using."""
return self._trace_class.__name__
def reset(self):
"""Clear collected data, and prepare to collect more."""
# A dictionary mapping filenames to dicts with linenumber keys,
# or mapping filenames to dicts with linenumber pairs as keys.
self.data = {}
# A cache of the results from should_trace, the decision about whether
# to trace execution in a file. A dict of filename to (filename or
# None).
self.should_trace_cache = {}
# Our active Tracers.
self.tracers = []
def _start_tracer(self):
"""Start a new Tracer object, and store it in self.tracers."""
tracer = self._trace_class()
tracer.data = self.data
tracer.arcs = self.branch
tracer.should_trace = self.should_trace
tracer.should_trace_cache = self.should_trace_cache
tracer.warn = self.warn
fn = tracer.start()
self.tracers.append(tracer)
return fn
# The trace function has to be set individually on each thread before
# execution begins. Ironically, the only support the threading module has
# for running code before the thread main is the tracing function. So we
# install this as a trace function, and the first time it's called, it does
# the real trace installation.
def _installation_trace(self, frame_unused, event_unused, arg_unused):
"""Called on new threads, installs the real tracer."""
# Remove ourselves as the trace function
sys.settrace(None)
# Install the real tracer.
fn = self._start_tracer()
# Invoke the real trace function with the current event, to be sure
# not to lose an event.
if fn:
fn = fn(frame_unused, event_unused, arg_unused)
# Return the new trace function to continue tracing in this scope.
return fn
def start(self):
"""Start collecting trace information."""
if self._collectors:
self._collectors[-1].pause()
self._collectors.append(self)
#print("Started: %r" % self._collectors, file=sys.stderr)
# Check to see whether we had a fullcoverage tracer installed.
traces0 = []
if hasattr(sys, "gettrace"):
fn0 = sys.gettrace()
if fn0:
tracer0 = getattr(fn0, '__self__', None)
if tracer0:
traces0 = getattr(tracer0, 'traces', [])
# Install the tracer on this thread.
fn = self._start_tracer()
for args in traces0:
(frame, event, arg), lineno = args
try:
fn(frame, event, arg, lineno=lineno)
except TypeError:
raise Exception(
"fullcoverage must be run with the C trace function."
)
# Install our installation tracer in threading, to jump start other
# threads.
threading.settrace(self._installation_trace)
def stop(self):
"""Stop collecting trace information."""
#print >>sys.stderr, "Stopping: %r" % self._collectors
assert self._collectors
assert self._collectors[-1] is self
self.pause()
self.tracers = []
# Remove this Collector from the stack, and resume the one underneath
# (if any).
self._collectors.pop()
if self._collectors:
self._collectors[-1].resume()
def pause(self):
"""Pause tracing, but be prepared to `resume`."""
for tracer in self.tracers:
tracer.stop()
stats = tracer.get_stats()
if stats:
print("\nCoverage.py tracer stats:")
for k in sorted(stats.keys()):
print("%16s: %s" % (k, stats[k]))
threading.settrace(None)
def resume(self):
"""Resume tracing after a `pause`."""
for tracer in self.tracers:
tracer.start()
threading.settrace(self._installation_trace)
def get_line_data(self):
"""Return the line data collected.
Data is { filename: { lineno: None, ...}, ...}
"""
if self.branch:
# If we were measuring branches, then we have to re-build the dict
# to show line data.
line_data = {}
for f, arcs in self.data.items():
line_data[f] = ldf = {}
for l1, _ in list(arcs.keys()):
if l1:
ldf[l1] = None
return line_data
else:
return self.data
def get_arc_data(self):
"""Return the arc data collected.
Data is { filename: { (l1, l2): None, ...}, ...}
Note that no data is collected or returned if the Collector wasn't
created with `branch` true.
"""
if self.branch:
return self.data
else:
return {}
| mit |
goldeneye-source/ges-python | lib/test/test_curses.py | 67 | 12003 | #
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import sys, tempfile, os
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import unittest
from test.support import requires, import_module
import inspect
requires('curses')
# If either of these don't exist, skip the tests.
curses = import_module('curses')
curses.panel = import_module('curses.panel')
# XXX: if newterm was supported we could use it instead of initscr and not exit
term = os.environ.get('TERM')
if not term or term == 'unknown':
raise unittest.SkipTest("$TERM=%r, calling initscr() may cause exit" % term)
if sys.platform == "cygwin":
raise unittest.SkipTest("cygwin's curses mostly just hangs")
def window_funcs(stdscr):
"Test the methods of windows"
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
try:
win.border(65, 66, 67, 68,
69, [], 71, 72)
except TypeError:
pass
else:
raise RuntimeError("Expected win.border() to raise TypeError")
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 2, 1, 3, 3)
win2.overwrite(win, 1, 2, 2, 1, 3, 3)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def module_funcs(stdscr):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
f = tempfile.TemporaryFile()
stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
f.close()
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp(b'abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm(b'cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
if curses.has_colors():
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
if hasattr(curses, 'keyname'):
curses.keyname(13)
if hasattr(curses, 'has_key'):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
# availmask indicates that mouse stuff not available.
if availmask != 0:
curses.mouseinterval(10)
# just verify these don't cause errors
curses.ungetmouse(0, 0, 0, 0, curses.BUTTON1_PRESSED)
m = curses.getmouse()
if hasattr(curses, 'is_term_resized'):
curses.is_term_resized(*stdscr.getmaxyx())
if hasattr(curses, 'resizeterm'):
curses.resizeterm(*stdscr.getmaxyx())
if hasattr(curses, 'resize_term'):
curses.resize_term(*stdscr.getmaxyx())
def unit_tests():
from curses import ascii
for ch, expected in [('a', 'a'), ('A', 'A'),
(';', ';'), (' ', ' '),
('\x7f', '^?'), ('\n', '^J'), ('\0', '^@'),
# Meta-bit characters
('\x8a', '!^J'), ('\xc1', '!A'),
]:
if ascii.unctrl(ch) != expected:
print('curses.unctrl fails on character', repr(ch))
def test_userptr_without_set(stdscr):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
try:
p.userptr()
raise RuntimeError('userptr should fail since not set')
except curses.panel.error:
pass
def test_userptr_memory_leak(stdscr):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
obj = object()
nrefs = sys.getrefcount(obj)
for i in range(100):
p.set_userptr(obj)
p.set_userptr(None)
if sys.getrefcount(obj) != nrefs:
raise RuntimeError("set_userptr leaked references")
def test_userptr_segfault(stdscr):
panel = curses.panel.new_panel(stdscr)
class A:
def __del__(self):
panel.set_userptr(None)
panel.set_userptr(A())
panel.set_userptr(None)
def test_resize_term(stdscr):
if hasattr(curses, 'resizeterm'):
lines, cols = curses.LINES, curses.COLS
curses.resizeterm(lines - 1, cols + 1)
if curses.LINES != lines - 1 or curses.COLS != cols + 1:
raise RuntimeError("Expected resizeterm to update LINES and COLS")
def test_issue6243(stdscr):
curses.ungetch(1025)
stdscr.getkey()
def test_unget_wch(stdscr):
if not hasattr(curses, 'unget_wch'):
return
encoding = stdscr.encoding
for ch in ('a', '\xe9', '\u20ac', '\U0010FFFF'):
try:
ch.encode(encoding)
except UnicodeEncodeError:
continue
try:
curses.unget_wch(ch)
except Exception as err:
raise Exception("unget_wch(%a) failed with encoding %s: %s"
% (ch, stdscr.encoding, err))
read = stdscr.get_wch()
if read != ch:
raise AssertionError("%r != %r" % (read, ch))
code = ord(ch)
curses.unget_wch(code)
read = stdscr.get_wch()
if read != ch:
raise AssertionError("%r != %r" % (read, ch))
def test_issue10570():
b = curses.tparm(curses.tigetstr("cup"), 5, 3)
assert type(b) is bytes
curses.putp(b)
def test_encoding(stdscr):
import codecs
encoding = stdscr.encoding
codecs.lookup(encoding)
try:
stdscr.encoding = 10
except TypeError:
pass
else:
raise AssertionError("TypeError not raised")
stdscr.encoding = encoding
try:
del stdscr.encoding
except TypeError:
pass
else:
raise AssertionError("TypeError not raised")
def test_issue21088(stdscr):
#
# http://bugs.python.org/issue21088
#
# the bug:
# when converting curses.window.addch to Argument Clinic
# the first two parameters were switched.
# if someday we can represent the signature of addch
# we will need to rewrite this test.
try:
signature = inspect.signature(stdscr.addch)
self.assertFalse(signature)
except ValueError:
# not generating a signature is fine.
pass
# So. No signature for addch.
# But Argument Clinic gave us a human-readable equivalent
# as the first line of the docstring. So we parse that,
# and ensure that the parameters appear in the correct order.
# Since this is parsing output from Argument Clinic, we can
# be reasonably certain the generated parsing code will be
# correct too.
human_readable_signature = stdscr.addch.__doc__.split("\n")[0]
offset = human_readable_signature.find("[y, x,]")
assert offset >= 0, ""
def main(stdscr):
curses.savetty()
try:
module_funcs(stdscr)
window_funcs(stdscr)
test_userptr_without_set(stdscr)
test_userptr_memory_leak(stdscr)
test_userptr_segfault(stdscr)
test_resize_term(stdscr)
test_issue6243(stdscr)
test_unget_wch(stdscr)
test_issue10570()
test_encoding(stdscr)
test_issue21088(stdscr)
finally:
curses.resetty()
def test_main():
if not sys.__stdout__.isatty():
raise unittest.SkipTest("sys.__stdout__ is not a tty")
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=sys.__stdout__.fileno())
try:
stdscr = curses.initscr()
main(stdscr)
finally:
curses.endwin()
unit_tests()
if __name__ == '__main__':
curses.wrapper(main)
unit_tests()
| gpl-3.0 |
dharmabumstead/ansible | test/units/modules/network/ios/test_ios_vrf.py | 38 | 7526 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_vrf
from units.modules.utils import set_module_args
from .ios_module import TestIosModule, load_fixture
class TestIosVrfModule(TestIosModule):
module = ios_vrf
def setUp(self):
super(TestIosVrfModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.ios.ios_vrf.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.ios.ios_vrf.load_config')
self.load_config = self.mock_load_config.start()
self.mock_exec_command = patch('ansible.modules.network.ios.ios_vrf.exec_command')
self.exec_command = self.mock_exec_command.start()
def tearDown(self):
super(TestIosVrfModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_exec_command.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('ios_vrf_config.cfg')
self.exec_command.return_value = (0, load_fixture('ios_vrf_config.cfg').strip(), None)
self.load_config.return_value = None
def test_ios_vrf_name(self):
set_module_args(dict(name='test_4'))
commands = ['vrf definition test_4', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_name_unchanged(self):
set_module_args(dict(name='test_1', rd='1:100', description='test vrf 1'))
self.execute_module()
def test_ios_vrf_description(self):
set_module_args(dict(name='test_1', description='test string'))
commands = ['vrf definition test_1', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_rd(self):
set_module_args(dict(name='test_1', rd='2:100'))
commands = ['vrf definition test_1', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'rd 2:100']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_interfaces(self):
set_module_args(dict(name='test_1', interfaces=['Ethernet1']))
commands = ['interface Ethernet2', 'no vrf forwarding test_1', 'interface Ethernet1', 'vrf forwarding test_1', 'ip address 1.2.3.4/5']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_state_absent(self):
set_module_args(dict(name='test_1', state='absent'))
commands = ['no vrf definition test_1']
self.execute_module(changed=True, commands=commands)
def test_ios_vrf_purge_all(self):
set_module_args(dict(purge=True))
commands = ['no vrf definition test_1', 'no vrf definition test_2', 'no vrf definition test_3']
self.execute_module(changed=True, commands=commands)
def test_ios_vrf_purge_all_but_one(self):
set_module_args(dict(name='test_1', purge=True))
commands = ['no vrf definition test_2', 'no vrf definition test_3']
self.execute_module(changed=True, commands=commands)
def test_ios_vrfs_no_purge(self):
vrfs = [{'name': 'test_1'}, {'name': 'test_4'}]
set_module_args(dict(vrfs=vrfs))
commands = ['vrf definition test_4', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit']
self.execute_module(changed=True, commands=commands)
def test_ios_vrfs_purge(self):
vrfs = [{'name': 'test_1'}, {'name': 'test_4'}]
set_module_args(dict(vrfs=vrfs, purge=True))
commands = ['vrf definition test_4', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'no vrf definition test_2',
'no vrf definition test_3']
self.execute_module(changed=True, commands=commands)
def test_ios_vrfs_global_arg(self):
vrfs = [{'name': 'test_1'}, {'name': 'test_2'}]
set_module_args(dict(vrfs=vrfs, description='test string'))
commands = ['vrf definition test_1', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string', 'vrf definition test_2',
'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrfs_local_override_description(self):
vrfs = [{'name': 'test_1', 'description': 'test vrf 1'}, {'name': 'test_2'}]
set_module_args(dict(vrfs=vrfs, description='test string'))
commands = ['vrf definition test_2', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrfs_local_override_state(self):
vrfs = [{'name': 'test_1', 'state': 'absent'}, {'name': 'test_2'}]
set_module_args(dict(vrfs=vrfs, description='test string'))
commands = ['no vrf definition test_1', 'vrf definition test_2', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit',
'description test string']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_route_both(self):
set_module_args(dict(name='test_5', rd='2:100', route_both=['2:100', '3:100']))
commands = ['vrf definition test_5', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'rd 2:100', 'route-target both 2:100',
'route-target both 3:100']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_route_import(self):
set_module_args(dict(name='test_6', rd='3:100', route_import=['3:100', '4:100']))
commands = ['vrf definition test_6', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'rd 3:100', 'route-target import 3:100',
'route-target import 4:100']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_route_export(self):
set_module_args(dict(name='test_7', rd='4:100', route_export=['3:100', '4:100']))
commands = ['vrf definition test_7', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'rd 4:100', 'route-target export 3:100',
'route-target export 4:100']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_vrf_route_both_exclusive(self):
set_module_args(dict(name='test_8', rd='5:100', route_both=['3:100', '4:100'], route_export=['3:100', '4:100']))
self.execute_module(failed=True)
| gpl-3.0 |
WillianPaiva/1flow | oneflow/core/admin/website.py | 2 | 2295 | # -*- coding: utf-8 -*-
u"""
Copyright 2013-2014 Olivier Cortès <[email protected]>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _ # , pgettext_lazy
from ..models.reldb import ( # NOQA
WebSite,
)
class WebSiteAdmin(admin.ModelAdmin):
""" WebSite admin class. """
list_display = (
'id', 'image_display', 'name',
'slug', 'url',
'fetch_limit_nr',
'feeds_count_display',
)
list_display_links = ('id', 'image', 'name', 'slug', )
list_filter = ('fetch_limit_nr', )
ordering = ('name', )
# date_hierarchy = 'date_created'
change_list_template = "admin/change_list_filter_sidebar.html"
change_list_filter_template = "admin/filter_listing.html"
search_fields = ('name', 'slug', 'url', )
def image_display(self, obj):
""" FILL ME, pep257. """
if obj.image:
image_url = obj.image.url
elif obj.image_url:
image_url = obj.image_url
else:
return u'—'
return (u'<img src="{0}" style="max-width: 48px; max-height: 48px'
u'"/>').format(image_url)
image_display.allow_tags = True
image_display.short_description = _(u'image')
# image_display.admin_order_field = 'feeds__count'
def feeds_count_display(self, obj):
""" FILL ME, pep257. """
try:
return obj.feeds.count()
except:
return u'—'
# feeds_count_display.allow_tags = True
feeds_count_display.short_description = _(u'Feeds')
feeds_count_display.admin_order_field = 'feeds__count'
| agpl-3.0 |
hainm/numpy | numpy/distutils/fcompiler/pathf95.py | 229 | 1209 | from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['PathScaleFCompiler']
class PathScaleFCompiler(FCompiler):
compiler_type = 'pathf95'
description = 'PathScale Fortran Compiler'
version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P<version>[\d.]+)'
executables = {
'version_cmd' : ["pathf95", "-version"],
'compiler_f77' : ["pathf95", "-fixedform"],
'compiler_fix' : ["pathf95", "-fixedform"],
'compiler_f90' : ["pathf95"],
'linker_so' : ["pathf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fPIC']
module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_opt(self):
return ['-O3']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
#compiler = PathScaleFCompiler()
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='pathf95')
compiler.customize()
print(compiler.get_version())
| bsd-3-clause |
scriptotek/vortex-mail-notifier | run.py | 1 | 1789 | # encoding=utf-8
import easywebdav
from ConfigParser import ConfigParser
from datetime import datetime
import json
import os
import requests
import rollbar
config = ConfigParser()
config.read('config.ini')
rollbar.init(config.get('rollbar', 'token'), 'production') # access_token, environment
try:
year = datetime.now().strftime('%Y')
seen = []
if os.path.exists('seen.json'):
seen = json.load(open('seen.json', 'r'))
webdav = easywebdav.connect(config.get('webdav', 'host'),
username=config.get('webdav', 'username'),
password=config.get('webdav', 'password'),
protocol='https')
for f in webdav.ls(config.get('page', 'path').format(year=year)):
url = f.name[webdav.baseurl.rfind(':'):]
if url not in seen and f.contenttype is not None and url.find('.html') != -1:
print "New url found: ", url
seen.append(url)
webdav.download(url, 'temp.json')
pagecontent = json.load(open('temp.json', 'r'))
requests.post(
'https://api.mailgun.net/v2/{}/messages'.format(config.get('mailgun', 'domain')),
auth=('api', config.get('mailgun', 'key')),
data={'from': config.get('page', 'sender'),
'to': [config.get('page', 'recipient')],
'subject': u"[{}] {}".format(config.get('page', 'subject'), pagecontent['properties']['title']),
'text': config.get('page', 'body')
})
json.dump(seen, open('seen.json', 'w'))
except IOError:
rollbar.report_message('Got an IOError in the main loop', 'warning')
except:
# catch-all
rollbar.report_exc_info()
| mit |
ArcaniteSolutions/truffe2 | truffe2/logistics/migrations/0015_auto__add_field_supply_price__chg_field_supplyreservation_contact_phon.py | 2 | 18305 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Supply.price'
db.alter_column(u'logistics_supply', 'price',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0))
# Changing field 'SupplyReservation.contact_phone'
db.alter_column(u'logistics_supplyreservation', 'contact_phone', self.gf('django.db.models.fields.CharField')(max_length=25))
def backwards(self, orm):
# Deleting field 'Supply.price'
# db.delete_column(u'logistics_supply', 'price')
# Changing field 'SupplyReservation.contact_phone'
db.alter_column(u'logistics_supplyreservation', 'contact_phone', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'logistics.room': {
'Meta': {'object_name': 'Room'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_calendar': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_external_calendar': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_externals': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'conditions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'conditions_externals': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_days': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'max_days_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'maximum_days_before': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'maximum_days_before_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'minimum_days_before': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'minimum_days_before_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'logistics.roomlogging': {
'Meta': {'object_name': 'RoomLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['logistics.Room']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.roomreservation': {
'Meta': {'object_name': 'RoomReservation'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'remarks': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['logistics.Room']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'unit_blank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'unit_blank_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']", 'null': 'True', 'blank': 'True'})
},
u'logistics.roomreservationlogging': {
'Meta': {'object_name': 'RoomReservationLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['logistics.RoomReservation']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.roomreservationviews': {
'Meta': {'object_name': 'RoomReservationViews'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'views'", 'to': u"orm['logistics.RoomReservation']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.roomviews': {
'Meta': {'object_name': 'RoomViews'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'views'", 'to': u"orm['logistics.Room']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.supply': {
'Meta': {'object_name': 'Supply'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_calendar': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_external_calendar': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_externals': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'conditions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'conditions_externals': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_days': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'max_days_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'maximum_days_before': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'maximum_days_before_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'minimum_days_before': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'minimum_days_before_externals': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'price': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'logistics.supplylogging': {
'Meta': {'object_name': 'SupplyLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['logistics.Supply']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.supplyreservation': {
'Meta': {'object_name': 'SupplyReservation'},
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'remarks': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'unit_blank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'unit_blank_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']", 'null': 'True', 'blank': 'True'})
},
u'logistics.supplyreservationline': {
'Meta': {'object_name': 'SupplyReservationLine'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'supply': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reservations'", 'to': u"orm['logistics.Supply']"}),
'supply_reservation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': u"orm['logistics.SupplyReservation']"})
},
u'logistics.supplyreservationlogging': {
'Meta': {'object_name': 'SupplyReservationLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['logistics.SupplyReservation']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.supplyreservationviews': {
'Meta': {'object_name': 'SupplyReservationViews'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'views'", 'to': u"orm['logistics.SupplyReservation']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'logistics.supplyviews': {
'Meta': {'object_name': 'SupplyViews'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'views'", 'to': u"orm['logistics.Supply']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'units.unit': {
'Meta': {'object_name': 'Unit'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_epfl': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'is_commission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_equipe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent_hierarchique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'email_perso': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'homepage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_betatester': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['logistics']
| bsd-2-clause |
endlessm/chromium-browser | base/third_party/libevent/event_rpcgen.py | 296 | 45502 | #!/usr/bin/env python
#
# Copyright (c) 2005 Niels Provos <[email protected]>
# All rights reserved.
#
# Generates marshaling code based on libevent.
import sys
import re
#
_NAME = "event_rpcgen.py"
_VERSION = "0.1"
_STRUCT_RE = '[a-z][a-z_0-9]*'
# Globals
line_count = 0
white = re.compile(r'^\s+')
cppcomment = re.compile(r'\/\/.*$')
headerdirect = []
cppdirect = []
# Holds everything that makes a struct
class Struct:
def __init__(self, name):
self._name = name
self._entries = []
self._tags = {}
print >>sys.stderr, ' Created struct: %s' % name
def AddEntry(self, entry):
if self._tags.has_key(entry.Tag()):
print >>sys.stderr, ( 'Entry "%s" duplicates tag number '
'%d from "%s" around line %d' ) % (
entry.Name(), entry.Tag(),
self._tags[entry.Tag()], line_count)
sys.exit(1)
self._entries.append(entry)
self._tags[entry.Tag()] = entry.Name()
print >>sys.stderr, ' Added entry: %s' % entry.Name()
def Name(self):
return self._name
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper()
def PrintIdented(self, file, ident, code):
"""Takes an array, add indentation to each entry and prints it."""
for entry in code:
print >>file, '%s%s' % (ident, entry)
def PrintTags(self, file):
"""Prints the tag definitions for a structure."""
print >>file, '/* Tag definition for %s */' % self._name
print >>file, 'enum %s_ {' % self._name.lower()
for entry in self._entries:
print >>file, ' %s=%d,' % (self.EntryTagName(entry),
entry.Tag())
print >>file, ' %s_MAX_TAGS' % (self._name.upper())
print >>file, '};\n'
def PrintForwardDeclaration(self, file):
print >>file, 'struct %s;' % self._name
def PrintDeclaration(self, file):
print >>file, '/* Structure declaration for %s */' % self._name
print >>file, 'struct %s_access_ {' % self._name
for entry in self._entries:
dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name())
dcl.extend(
entry.GetDeclaration('(*%s_get)' % entry.Name()))
if entry.Array():
dcl.extend(
entry.AddDeclaration('(*%s_add)' % entry.Name()))
self.PrintIdented(file, ' ', dcl)
print >>file, '};\n'
print >>file, 'struct %s {' % self._name
print >>file, ' struct %s_access_ *base;\n' % self._name
for entry in self._entries:
dcl = entry.Declaration()
self.PrintIdented(file, ' ', dcl)
print >>file, ''
for entry in self._entries:
print >>file, ' ev_uint8_t %s_set;' % entry.Name()
print >>file, '};\n'
print >>file, \
"""struct %(name)s *%(name)s_new(void);
void %(name)s_free(struct %(name)s *);
void %(name)s_clear(struct %(name)s *);
void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
int %(name)s_complete(struct %(name)s *);
void evtag_marshal_%(name)s(struct evbuffer *, ev_uint32_t,
const struct %(name)s *);
int evtag_unmarshal_%(name)s(struct evbuffer *, ev_uint32_t,
struct %(name)s *);""" % { 'name' : self._name }
# Write a setting function of every variable
for entry in self._entries:
self.PrintIdented(file, '', entry.AssignDeclaration(
entry.AssignFuncName()))
self.PrintIdented(file, '', entry.GetDeclaration(
entry.GetFuncName()))
if entry.Array():
self.PrintIdented(file, '', entry.AddDeclaration(
entry.AddFuncName()))
print >>file, '/* --- %s done --- */\n' % self._name
def PrintCode(self, file):
print >>file, ('/*\n'
' * Implementation of %s\n'
' */\n') % self._name
print >>file, \
'static struct %(name)s_access_ __%(name)s_base = {' % \
{ 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeBase())
print >>file, '};\n'
# Creation
print >>file, (
'struct %(name)s *\n'
'%(name)s_new(void)\n'
'{\n'
' struct %(name)s *tmp;\n'
' if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {\n'
' event_warn("%%s: malloc", __func__);\n'
' return (NULL);\n'
' }\n'
' tmp->base = &__%(name)s_base;\n') % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeNew('tmp'))
print >>file, ' tmp->%s_set = 0;\n' % entry.Name()
print >>file, (
' return (tmp);\n'
'}\n')
# Adding
for entry in self._entries:
if entry.Array():
self.PrintIdented(file, '', entry.CodeAdd())
print >>file, ''
# Assigning
for entry in self._entries:
self.PrintIdented(file, '', entry.CodeAssign())
print >>file, ''
# Getting
for entry in self._entries:
self.PrintIdented(file, '', entry.CodeGet())
print >>file, ''
# Clearing
print >>file, ( 'void\n'
'%(name)s_clear(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeClear('tmp'))
print >>file, '}\n'
# Freeing
print >>file, ( 'void\n'
'%(name)s_free(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeFree('tmp'))
print >>file, (' free(tmp);\n'
'}\n')
# Marshaling
print >>file, ('void\n'
'%(name)s_marshal(struct evbuffer *evbuf, '
'const struct %(name)s *tmp)'
'{') % { 'name' : self._name }
for entry in self._entries:
indent = ' '
# Optional entries do not have to be set
if entry.Optional():
indent += ' '
print >>file, ' if (tmp->%s_set) {' % entry.Name()
self.PrintIdented(
file, indent,
entry.CodeMarshal('evbuf', self.EntryTagName(entry), 'tmp'))
if entry.Optional():
print >>file, ' }'
print >>file, '}\n'
# Unmarshaling
print >>file, ('int\n'
'%(name)s_unmarshal(struct %(name)s *tmp, '
' struct evbuffer *evbuf)\n'
'{\n'
' ev_uint32_t tag;\n'
' while (EVBUFFER_LENGTH(evbuf) > 0) {\n'
' if (evtag_peek(evbuf, &tag) == -1)\n'
' return (-1);\n'
' switch (tag) {\n'
) % { 'name' : self._name }
for entry in self._entries:
print >>file, ' case %s:\n' % self.EntryTagName(entry)
if not entry.Array():
print >>file, (
' if (tmp->%s_set)\n'
' return (-1);'
) % (entry.Name())
self.PrintIdented(
file, ' ',
entry.CodeUnmarshal('evbuf',
self.EntryTagName(entry), 'tmp'))
print >>file, ( ' tmp->%s_set = 1;\n' % entry.Name() +
' break;\n' )
print >>file, ( ' default:\n'
' return -1;\n'
' }\n'
' }\n' )
# Check if it was decoded completely
print >>file, ( ' if (%(name)s_complete(tmp) == -1)\n'
' return (-1);'
) % { 'name' : self._name }
# Successfully decoded
print >>file, ( ' return (0);\n'
'}\n')
# Checking if a structure has all the required data
print >>file, (
'int\n'
'%(name)s_complete(struct %(name)s *msg)\n'
'{' ) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(
file, ' ',
entry.CodeComplete('msg'))
print >>file, (
' return (0);\n'
'}\n' )
# Complete message unmarshaling
print >>file, (
'int\n'
'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, '
'ev_uint32_t need_tag, struct %(name)s *msg)\n'
'{\n'
' ev_uint32_t tag;\n'
' int res = -1;\n'
'\n'
' struct evbuffer *tmp = evbuffer_new();\n'
'\n'
' if (evtag_unmarshal(evbuf, &tag, tmp) == -1'
' || tag != need_tag)\n'
' goto error;\n'
'\n'
' if (%(name)s_unmarshal(msg, tmp) == -1)\n'
' goto error;\n'
'\n'
' res = 0;\n'
'\n'
' error:\n'
' evbuffer_free(tmp);\n'
' return (res);\n'
'}\n' ) % { 'name' : self._name }
# Complete message marshaling
print >>file, (
'void\n'
'evtag_marshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t tag, '
'const struct %(name)s *msg)\n'
'{\n'
' struct evbuffer *_buf = evbuffer_new();\n'
' assert(_buf != NULL);\n'
' evbuffer_drain(_buf, -1);\n'
' %(name)s_marshal(_buf, msg);\n'
' evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), '
'EVBUFFER_LENGTH(_buf));\n'
' evbuffer_free(_buf);\n'
'}\n' ) % { 'name' : self._name }
class Entry:
def __init__(self, type, name, tag):
self._type = type
self._name = name
self._tag = int(tag)
self._ctype = type
self._optional = 0
self._can_be_array = 0
self._array = 0
self._line_count = -1
self._struct = None
self._refname = None
def GetTranslation(self):
return { "parent_name" : self._struct.Name(),
"name" : self._name,
"ctype" : self._ctype,
"refname" : self._refname
}
def SetStruct(self, struct):
self._struct = struct
def LineCount(self):
assert self._line_count != -1
return self._line_count
def SetLineCount(self, number):
self._line_count = number
def Array(self):
return self._array
def Optional(self):
return self._optional
def Tag(self):
return self._tag
def Name(self):
return self._name
def Type(self):
return self._type
def MakeArray(self, yes=1):
self._array = yes
def MakeOptional(self):
self._optional = 1
def GetFuncName(self):
return '%s_%s_get' % (self._struct.Name(), self._name)
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeGet(self):
code = (
'int',
'%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, '
'%(ctype)s *value)',
'{',
' if (msg->%(name)s_set != 1)',
' return (-1);',
' *value = msg->%(name)s_data;',
' return (0);',
'}' )
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def AssignFuncName(self):
return '%s_%s_assign' % (self._struct.Name(), self._name)
def AddFuncName(self):
return '%s_%s_add' % (self._struct.Name(), self._name)
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
code = [ 'int',
'%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,'
' const %(ctype)s value)',
'{',
' msg->%(name)s_set = 1;',
' msg->%(name)s_data = value;',
' return (0);',
'}' ]
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()) ]
return code
def CodeComplete(self, structname):
if self.Optional():
return []
code = [ 'if (!%s->%s_set)' % (structname, self.Name()),
' return (-1);' ]
return code
def CodeFree(self, name):
return []
def CodeBase(self):
code = [
'%(parent_name)s_%(name)s_assign,',
'%(parent_name)s_%(name)s_get,'
]
if self.Array():
code.append('%(parent_name)s_%(name)s_add,')
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def Verify(self):
if self.Array() and not self._can_be_array:
print >>sys.stderr, (
'Entry "%s" cannot be created as an array '
'around line %d' ) % (self._name, self.LineCount())
sys.exit(1)
if not self._struct:
print >>sys.stderr, (
'Entry "%s" does not know which struct it belongs to '
'around line %d' ) % (self._name, self.LineCount())
sys.exit(1)
if self._optional and self._array:
print >>sys.stderr, ( 'Entry "%s" has illegal combination of '
'optional and array around line %d' ) % (
self._name, self.LineCount() )
sys.exit(1)
class EntryBytes(Entry):
def __init__(self, type, name, tag, length):
# Init base class
Entry.__init__(self, type, name, tag)
self._length = length
self._ctype = 'ev_uint8_t'
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s **);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def Declaration(self):
dcl = ['ev_uint8_t %s_data[%s];' % (self._name, self._length)]
return dcl
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s **value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, const %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' msg->%s_set = 1;' % name,
' memcpy(msg->%s_data, value, %s);' % (
name, self._length),
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = [ 'if (evtag_unmarshal_fixed(%s, %s, ' % (buf, tag_name) +
'%s->%s_data, ' % (var_name, self._name) +
'sizeof(%s->%s_data)) == -1) {' % (
var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal(%s, %s, %s->%s_data, sizeof(%s->%s_data));' % (
buf, tag_name, var_name, self._name, var_name, self._name )]
return code
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()),
'memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
structname, self._name, structname, self._name)]
return code
def CodeNew(self, name):
code = ['memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
name, self._name, name, self._name)]
return code
def Verify(self):
if not self._length:
print >>sys.stderr, 'Entry "%s" needs a length around line %d' % (
self._name, self.LineCount() )
sys.exit(1)
Entry.Verify(self)
class EntryInt(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'ev_uint32_t'
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_unmarshal_int(%s, %s, &%s->%s_data) == -1) {' % (
buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}' ]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_int(%s, %s, %s->%s_data);' % (
buf, tag_name, var_name, self._name)]
return code
def Declaration(self):
dcl = ['ev_uint32_t %s_data;' % self._name]
return dcl
def CodeNew(self, name):
code = ['%s->%s_data = 0;' % (name, self._name)]
return code
class EntryString(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'char *'
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
if (msg->%(name)s_data != NULL)
free(msg->%(name)s_data);
if ((msg->%(name)s_data = strdup(value)) == NULL)
return (-1);
msg->%(name)s_set = 1;
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_unmarshal_string(%s, %s, &%s->%s_data) == -1) {' % (
buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_string(%s, %s, %s->%s_data);' % (
buf, tag_name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free (%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free (%s->%s_data); ' % (name, self._name)]
return code
def Declaration(self):
dcl = ['char *%s_data;' % self._name]
return dcl
class EntryStruct(Entry):
def __init__(self, type, name, tag, refname):
# Init base class
Entry.__init__(self, type, name, tag)
self._can_be_array = 1
self._refname = refname
self._ctype = 'struct %s*' % refname
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1) {' % name,
' msg->%s_data = %s_new();' % (name, self._refname),
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' }',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (msg->%(name)s_set) {
%(refname)s_clear(msg->%(name)s_data);
msg->%(name)s_set = 0;
} else {
msg->%(name)s_data = %(refname)s_new();
if (msg->%(name)s_data == NULL) {
event_warn("%%s: %(refname)s_new()", __func__);
goto error;
}
}
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
msg->%(name)s_set = 1;
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
if (msg->%(name)s_data != NULL) {
%(refname)s_free(msg->%(name)s_data);
msg->%(name)s_data = NULL;
}
return (-1);
}""" % self.GetTranslation()
return code.split('\n')
def CodeComplete(self, structname):
if self.Optional():
code = [ 'if (%s->%s_set && %s_complete(%s->%s_data) == -1)' % (
structname, self.Name(),
self._refname, structname, self.Name()),
' return (-1);' ]
else:
code = [ 'if (%s_complete(%s->%s_data) == -1)' % (
self._refname, structname, self.Name()),
' return (-1);' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['%s->%s_data = %s_new();' % (
var_name, self._name, self._refname),
'if (%s->%s_data == NULL)' % (var_name, self._name),
' return (-1);',
'if (evtag_unmarshal_%s(%s, %s, %s->%s_data) == -1) {' % (
self._refname, buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_%s(%s, %s, %s->%s_data);' % (
self._refname, buf, tag_name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' %s_free(%s->%s_data);' % (
self._refname, structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' %s_free(%s->%s_data); ' % (
self._refname, name, self._name)]
return code
def Declaration(self):
dcl = ['%s %s_data;' % (self._ctype, self._name)]
return dcl
class EntryVarBytes(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'ev_uint8_t *'
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *, ev_uint32_t *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s, ev_uint32_t);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, '
'const %s value, ev_uint32_t len)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_data != NULL)' % name,
' free (msg->%s_data);' % name,
' msg->%s_data = malloc(len);' % name,
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' msg->%s_length = len;' % name,
' memcpy(msg->%s_data, value, len);' % name,
' return (0);',
'}' ]
return code
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value, ev_uint32_t *plen)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' *plen = msg->%s_length;' % name,
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_payload_length(%s, &%s->%s_length) == -1)' % (
buf, var_name, self._name),
' return (-1);',
# We do not want DoS opportunities
'if (%s->%s_length > EVBUFFER_LENGTH(%s))' % (
var_name, self._name, buf),
' return (-1);',
'if ((%s->%s_data = malloc(%s->%s_length)) == NULL)' % (
var_name, self._name, var_name, self._name),
' return (-1);',
'if (evtag_unmarshal_fixed(%s, %s, %s->%s_data, '
'%s->%s_length) == -1) {' % (
buf, tag_name, var_name, self._name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal(%s, %s, %s->%s_data, %s->%s_length);' % (
buf, tag_name, var_name, self._name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free (%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_length = 0;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name) ]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free (%s->%s_data); ' % (name, self._name)]
return code
def Declaration(self):
dcl = ['ev_uint8_t *%s_data;' % self._name,
'ev_uint32_t %s_length;' % self._name]
return dcl
class EntryArray(Entry):
def __init__(self, entry):
# Init base class
Entry.__init__(self, entry._type, entry._name, entry._tag)
self._entry = entry
self._refname = entry._refname
self._ctype = 'struct %s *' % self._refname
def GetDeclaration(self, funcname):
"""Allows direct access to elements of the array."""
translate = self.GetTranslation()
translate["funcname"] = funcname
code = [
'int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);' %
translate ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, int, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AddDeclaration(self, funcname):
code = [ '%s %s(struct %s *);' % (
self._ctype, funcname, self._struct.Name() ) ]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
%(ctype)s *value)
{
if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
return (-1);
*value = msg->%(name)s_data[offset];
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeAssign(self):
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)
return (-1);
%(refname)s_clear(msg->%(name)s_data[off]);
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data[off], tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
%(refname)s_clear(msg->%(name)s_data[off]);
return (-1);
}""" % self.GetTranslation()
return code.split('\n')
def CodeAdd(self):
code = \
"""%(ctype)s
%(parent_name)s_%(name)s_add(struct %(parent_name)s *msg)
{
if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {
int tobe_allocated = msg->%(name)s_num_allocated;
%(ctype)s* new_data = NULL;
tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;
new_data = (%(ctype)s*) realloc(msg->%(name)s_data,
tobe_allocated * sizeof(%(ctype)s));
if (new_data == NULL)
goto error;
msg->%(name)s_data = new_data;
msg->%(name)s_num_allocated = tobe_allocated;
}
msg->%(name)s_data[msg->%(name)s_length - 1] = %(refname)s_new();
if (msg->%(name)s_data[msg->%(name)s_length - 1] == NULL)
goto error;
msg->%(name)s_set = 1;
return (msg->%(name)s_data[msg->%(name)s_length - 1]);
error:
--msg->%(name)s_length;
return (NULL);
}
""" % self.GetTranslation()
return code.split('\n')
def CodeComplete(self, structname):
code = []
translate = self.GetTranslation()
if self.Optional():
code.append( 'if (%(structname)s->%(name)s_set)' % translate)
translate["structname"] = structname
tmp = """{
int i;
for (i = 0; i < %(structname)s->%(name)s_length; ++i) {
if (%(refname)s_complete(%(structname)s->%(name)s_data[i]) == -1)
return (-1);
}
}""" % translate
code.extend(tmp.split('\n'))
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
translate = self.GetTranslation()
translate["var_name"] = var_name
translate["buf"] = buf
translate["tag_name"] = tag_name
code = """if (%(parent_name)s_%(name)s_add(%(var_name)s) == NULL)
return (-1);
if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag_name)s,
%(var_name)s->%(name)s_data[%(var_name)s->%(name)s_length - 1]) == -1) {
--%(var_name)s->%(name)s_length;
event_warnx("%%s: failed to unmarshal %(name)s", __func__);
return (-1);
}""" % translate
return code.split('\n')
def CodeMarshal(self, buf, tag_name, var_name):
code = ['{',
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
var_name, self._name),
' evtag_marshal_%s(%s, %s, %s->%s_data[i]);' % (
self._refname, buf, tag_name, var_name, self._name),
' }',
'}'
]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
structname, self.Name()),
' %s_free(%s->%s_data[i]);' % (
self._refname, structname, self.Name()),
' }',
' free(%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
' %s->%s_length = 0;' % (structname, self.Name()),
' %s->%s_num_allocated = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name),
'%s->%s_num_allocated = 0;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL) {' % (name, self._name),
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
name, self._name),
' %s_free(%s->%s_data[i]); ' % (
self._refname, name, self._name),
' %s->%s_data[i] = NULL;' % (name, self._name),
' }',
' free(%s->%s_data);' % (name, self._name),
' %s->%s_data = NULL;' % (name, self._name),
' %s->%s_length = 0;' % (name, self._name),
' %s->%s_num_allocated = 0;' % (name, self._name),
'}'
]
return code
def Declaration(self):
dcl = ['struct %s **%s_data;' % (self._refname, self._name),
'int %s_length;' % self._name,
'int %s_num_allocated;' % self._name ]
return dcl
def NormalizeLine(line):
global white
global cppcomment
line = cppcomment.sub('', line)
line = line.strip()
line = white.sub(' ', line)
return line
def ProcessOneEntry(newstruct, entry):
optional = 0
array = 0
entry_type = ''
name = ''
tag = ''
tag_set = None
separator = ''
fixed_length = ''
tokens = entry.split(' ')
while tokens:
token = tokens[0]
tokens = tokens[1:]
if not entry_type:
if not optional and token == 'optional':
optional = 1
continue
if not array and token == 'array':
array = 1
continue
if not entry_type:
entry_type = token
continue
if not name:
res = re.match(r'^([^\[\]]+)(\[.*\])?$', token)
if not res:
print >>sys.stderr, 'Cannot parse name: \"%s\" around %d' % (
entry, line_count)
sys.exit(1)
name = res.group(1)
fixed_length = res.group(2)
if fixed_length:
fixed_length = fixed_length[1:-1]
continue
if not separator:
separator = token
if separator != '=':
print >>sys.stderr, 'Expected "=" after name \"%s\" got %s' % (
name, token)
sys.exit(1)
continue
if not tag_set:
tag_set = 1
if not re.match(r'^(0x)?[0-9]+$', token):
print >>sys.stderr, 'Expected tag number: \"%s\"' % entry
sys.exit(1)
tag = int(token, 0)
continue
print >>sys.stderr, 'Cannot parse \"%s\"' % entry
sys.exit(1)
if not tag_set:
print >>sys.stderr, 'Need tag number: \"%s\"' % entry
sys.exit(1)
# Create the right entry
if entry_type == 'bytes':
if fixed_length:
newentry = EntryBytes(entry_type, name, tag, fixed_length)
else:
newentry = EntryVarBytes(entry_type, name, tag)
elif entry_type == 'int' and not fixed_length:
newentry = EntryInt(entry_type, name, tag)
elif entry_type == 'string' and not fixed_length:
newentry = EntryString(entry_type, name, tag)
else:
res = re.match(r'^struct\[(%s)\]$' % _STRUCT_RE,
entry_type, re.IGNORECASE)
if res:
# References another struct defined in our file
newentry = EntryStruct(entry_type, name, tag, res.group(1))
else:
print >>sys.stderr, 'Bad type: "%s" in "%s"' % (entry_type, entry)
sys.exit(1)
structs = []
if optional:
newentry.MakeOptional()
if array:
newentry.MakeArray()
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.Verify()
if array:
# We need to encapsulate this entry into a struct
newname = newentry.Name()+ '_array'
# Now borgify the new entry.
newentry = EntryArray(newentry)
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.MakeArray()
newstruct.AddEntry(newentry)
return structs
def ProcessStruct(data):
tokens = data.split(' ')
# First three tokens are: 'struct' 'name' '{'
newstruct = Struct(tokens[1])
inside = ' '.join(tokens[3:-1])
tokens = inside.split(';')
structs = []
for entry in tokens:
entry = NormalizeLine(entry)
if not entry:
continue
# It's possible that new structs get defined in here
structs.extend(ProcessOneEntry(newstruct, entry))
structs.append(newstruct)
return structs
def GetNextStruct(file):
global line_count
global cppdirect
got_struct = 0
processed_lines = []
have_c_comment = 0
data = ''
while 1:
line = file.readline()
if not line:
break
line_count += 1
line = line[:-1]
if not have_c_comment and re.search(r'/\*', line):
if re.search(r'/\*.*\*/', line):
line = re.sub(r'/\*.*\*/', '', line)
else:
line = re.sub(r'/\*.*$', '', line)
have_c_comment = 1
if have_c_comment:
if not re.search(r'\*/', line):
continue
have_c_comment = 0
line = re.sub(r'^.*\*/', '', line)
line = NormalizeLine(line)
if not line:
continue
if not got_struct:
if re.match(r'#include ["<].*[>"]', line):
cppdirect.append(line)
continue
if re.match(r'^#(if( |def)|endif)', line):
cppdirect.append(line)
continue
if re.match(r'^#define', line):
headerdirect.append(line)
continue
if not re.match(r'^struct %s {$' % _STRUCT_RE,
line, re.IGNORECASE):
print >>sys.stderr, 'Missing struct on line %d: %s' % (
line_count, line)
sys.exit(1)
else:
got_struct = 1
data += line
continue
# We are inside the struct
tokens = line.split('}')
if len(tokens) == 1:
data += ' ' + line
continue
if len(tokens[1]):
print >>sys.stderr, 'Trailing garbage after struct on line %d' % (
line_count )
sys.exit(1)
# We found the end of the struct
data += ' %s}' % tokens[0]
break
# Remove any comments, that might be in there
data = re.sub(r'/\*.*\*/', '', data)
return data
def Parse(file):
"""
Parses the input file and returns C code and corresponding header file.
"""
entities = []
while 1:
# Just gets the whole struct nicely formatted
data = GetNextStruct(file)
if not data:
break
entities.extend(ProcessStruct(data))
return entities
def GuardName(name):
name = '_'.join(name.split('.'))
name = '_'.join(name.split('/'))
guard = '_'+name.upper()+'_'
return guard
def HeaderPreamble(name):
guard = GuardName(name)
pre = (
'/*\n'
' * Automatically generated from %s\n'
' */\n\n'
'#ifndef %s\n'
'#define %s\n\n' ) % (
name, guard, guard)
# insert stdint.h - let's hope everyone has it
pre += (
'#include <event-config.h>\n'
'#ifdef _EVENT_HAVE_STDINT_H\n'
'#include <stdint.h>\n'
'#endif\n' )
for statement in headerdirect:
pre += '%s\n' % statement
if headerdirect:
pre += '\n'
pre += (
'#define EVTAG_HAS(msg, member) ((msg)->member##_set == 1)\n'
'#ifdef __GNUC__\n'
'#define EVTAG_ASSIGN(msg, member, args...) '
'(*(msg)->base->member##_assign)(msg, ## args)\n'
'#define EVTAG_GET(msg, member, args...) '
'(*(msg)->base->member##_get)(msg, ## args)\n'
'#else\n'
'#define EVTAG_ASSIGN(msg, member, ...) '
'(*(msg)->base->member##_assign)(msg, ## __VA_ARGS__)\n'
'#define EVTAG_GET(msg, member, ...) '
'(*(msg)->base->member##_get)(msg, ## __VA_ARGS__)\n'
'#endif\n'
'#define EVTAG_ADD(msg, member) (*(msg)->base->member##_add)(msg)\n'
'#define EVTAG_LEN(msg, member) ((msg)->member##_length)\n'
)
return pre
def HeaderPostamble(name):
guard = GuardName(name)
return '#endif /* %s */' % guard
def BodyPreamble(name):
global _NAME
global _VERSION
header_file = '.'.join(name.split('.')[:-1]) + '.gen.h'
pre = ( '/*\n'
' * Automatically generated from %s\n'
' * by %s/%s. DO NOT EDIT THIS FILE.\n'
' */\n\n' ) % (name, _NAME, _VERSION)
pre += ( '#include <sys/types.h>\n'
'#ifdef _EVENT_HAVE_SYS_TIME_H\n'
'#include <sys/time.h>\n'
'#endif\n'
'#include <stdlib.h>\n'
'#include <string.h>\n'
'#include <assert.h>\n'
'#define EVENT_NO_STRUCT\n'
'#include <event.h>\n\n'
'#ifdef _EVENT___func__\n'
'#define __func__ _EVENT___func__\n'
'#endif\n' )
for statement in cppdirect:
pre += '%s\n' % statement
pre += '\n#include "%s"\n\n' % header_file
pre += 'void event_err(int eval, const char *fmt, ...);\n'
pre += 'void event_warn(const char *fmt, ...);\n'
pre += 'void event_errx(int eval, const char *fmt, ...);\n'
pre += 'void event_warnx(const char *fmt, ...);\n\n'
return pre
def main(argv):
if len(argv) < 2 or not argv[1]:
print >>sys.stderr, 'Need RPC description file as first argument.'
sys.exit(1)
filename = argv[1]
ext = filename.split('.')[-1]
if ext != 'rpc':
print >>sys.stderr, 'Unrecognized file extension: %s' % ext
sys.exit(1)
print >>sys.stderr, 'Reading \"%s\"' % filename
fp = open(filename, 'r')
entities = Parse(fp)
fp.close()
header_file = '.'.join(filename.split('.')[:-1]) + '.gen.h'
impl_file = '.'.join(filename.split('.')[:-1]) + '.gen.c'
print >>sys.stderr, '... creating "%s"' % header_file
header_fp = open(header_file, 'w')
print >>header_fp, HeaderPreamble(filename)
# Create forward declarations: allows other structs to reference
# each other
for entry in entities:
entry.PrintForwardDeclaration(header_fp)
print >>header_fp, ''
for entry in entities:
entry.PrintTags(header_fp)
entry.PrintDeclaration(header_fp)
print >>header_fp, HeaderPostamble(filename)
header_fp.close()
print >>sys.stderr, '... creating "%s"' % impl_file
impl_fp = open(impl_file, 'w')
print >>impl_fp, BodyPreamble(filename)
for entry in entities:
entry.PrintCode(impl_fp)
impl_fp.close()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
leandrotoledo/python-telegram-bot | tests/test_helpers.py | 2 | 16200 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import os
import time
import datetime as dtm
from importlib import reload
from pathlib import Path
from unittest import mock
import pytest
from telegram import Sticker, InputFile, Animation
from telegram import Update
from telegram import User
from telegram import MessageEntity
from telegram.ext import Defaults
from telegram.message import Message
from telegram.utils import helpers
from telegram.utils.helpers import _datetime_to_float_timestamp
# sample time specification values categorised into absolute / delta / time-of-day
from tests.conftest import env_var_2_bool
ABSOLUTE_TIME_SPECS = [
dtm.datetime.now(tz=dtm.timezone(dtm.timedelta(hours=-7))),
dtm.datetime.utcnow(),
]
DELTA_TIME_SPECS = [dtm.timedelta(hours=3, seconds=42, milliseconds=2), 30, 7.5]
TIME_OF_DAY_TIME_SPECS = [
dtm.time(12, 42, tzinfo=dtm.timezone(dtm.timedelta(hours=-7))),
dtm.time(12, 42),
]
RELATIVE_TIME_SPECS = DELTA_TIME_SPECS + TIME_OF_DAY_TIME_SPECS
TIME_SPECS = ABSOLUTE_TIME_SPECS + RELATIVE_TIME_SPECS
"""
This part is here for ptb-raw, where we don't have pytz (unless the user installs it)
Because imports in pytest are intricate, we just run
pytest -k test_helpers.py
with the TEST_NO_PYTZ environment variable set in addition to the regular test suite.
Because actually uninstalling pytz would lead to errors in the test suite we just mock the
import to raise the expected exception.
Note that a fixture that just does this for every test that needs it is a nice idea, but for some
reason makes test_updater.py hang indefinitely on GitHub Actions (at least when Hinrich tried that)
"""
TEST_NO_PYTZ = env_var_2_bool(os.getenv('TEST_NO_PYTZ', False))
if TEST_NO_PYTZ:
orig_import = __import__
def import_mock(module_name, *args, **kwargs):
if module_name == 'pytz':
raise ModuleNotFoundError('We are testing without pytz here')
return orig_import(module_name, *args, **kwargs)
with mock.patch('builtins.__import__', side_effect=import_mock):
reload(helpers)
class TestHelpers:
def test_helpers_utc(self):
# Here we just test, that we got the correct UTC variant
if TEST_NO_PYTZ:
assert helpers.UTC is helpers.DTM_UTC
else:
assert helpers.UTC is not helpers.DTM_UTC
def test_escape_markdown(self):
test_str = '*bold*, _italic_, `code`, [text_link](http://github.com/)'
expected_str = r'\*bold\*, \_italic\_, \`code\`, \[text\_link](http://github.com/)'
assert expected_str == helpers.escape_markdown(test_str)
def test_escape_markdown_v2(self):
test_str = 'a_b*c[d]e (fg) h~I`>JK#L+MN -O=|p{qr}s.t! u'
expected_str = r'a\_b\*c\[d\]e \(fg\) h\~I\`\>JK\#L\+MN \-O\=\|p\{qr\}s\.t\! u'
assert expected_str == helpers.escape_markdown(test_str, version=2)
def test_escape_markdown_v2_monospaced(self):
test_str = r'mono/pre: `abc` \int (`\some \`stuff)'
expected_str = 'mono/pre: \\`abc\\` \\\\int (\\`\\\\some \\\\\\`stuff)'
assert expected_str == helpers.escape_markdown(
test_str, version=2, entity_type=MessageEntity.PRE
)
assert expected_str == helpers.escape_markdown(
test_str, version=2, entity_type=MessageEntity.CODE
)
def test_escape_markdown_v2_text_link(self):
test_str = 'https://url.containing/funny)cha)\\ra\\)cter\\s'
expected_str = 'https://url.containing/funny\\)cha\\)\\\\ra\\\\\\)cter\\\\s'
assert expected_str == helpers.escape_markdown(
test_str, version=2, entity_type=MessageEntity.TEXT_LINK
)
def test_markdown_invalid_version(self):
with pytest.raises(ValueError):
helpers.escape_markdown('abc', version=-1)
def test_to_float_timestamp_absolute_naive(self):
"""Conversion from timezone-naive datetime to timestamp.
Naive datetimes should be assumed to be in UTC.
"""
datetime = dtm.datetime(2019, 11, 11, 0, 26, 16, 10 ** 5)
assert helpers.to_float_timestamp(datetime) == 1573431976.1
def test_to_float_timestamp_absolute_naive_no_pytz(self, monkeypatch):
"""Conversion from timezone-naive datetime to timestamp.
Naive datetimes should be assumed to be in UTC.
"""
monkeypatch.setattr(helpers, 'UTC', helpers.DTM_UTC)
datetime = dtm.datetime(2019, 11, 11, 0, 26, 16, 10 ** 5)
assert helpers.to_float_timestamp(datetime) == 1573431976.1
def test_to_float_timestamp_absolute_aware(self, timezone):
"""Conversion from timezone-aware datetime to timestamp"""
# we're parametrizing this with two different UTC offsets to exclude the possibility
# of an xpass when the test is run in a timezone with the same UTC offset
test_datetime = dtm.datetime(2019, 11, 11, 0, 26, 16, 10 ** 5)
datetime = timezone.localize(test_datetime)
assert (
helpers.to_float_timestamp(datetime)
== 1573431976.1 - timezone.utcoffset(test_datetime).total_seconds()
)
def test_to_float_timestamp_absolute_no_reference(self):
"""A reference timestamp is only relevant for relative time specifications"""
with pytest.raises(ValueError):
helpers.to_float_timestamp(dtm.datetime(2019, 11, 11), reference_timestamp=123)
@pytest.mark.parametrize('time_spec', DELTA_TIME_SPECS, ids=str)
def test_to_float_timestamp_delta(self, time_spec):
"""Conversion from a 'delta' time specification to timestamp"""
reference_t = 0
delta = time_spec.total_seconds() if hasattr(time_spec, 'total_seconds') else time_spec
assert helpers.to_float_timestamp(time_spec, reference_t) == reference_t + delta
def test_to_float_timestamp_time_of_day(self):
"""Conversion from time-of-day specification to timestamp"""
hour, hour_delta = 12, 1
ref_t = _datetime_to_float_timestamp(dtm.datetime(1970, 1, 1, hour=hour))
# test for a time of day that is still to come, and one in the past
time_future, time_past = dtm.time(hour + hour_delta), dtm.time(hour - hour_delta)
assert helpers.to_float_timestamp(time_future, ref_t) == ref_t + 60 * 60 * hour_delta
assert helpers.to_float_timestamp(time_past, ref_t) == ref_t + 60 * 60 * (24 - hour_delta)
def test_to_float_timestamp_time_of_day_timezone(self, timezone):
"""Conversion from timezone-aware time-of-day specification to timestamp"""
# we're parametrizing this with two different UTC offsets to exclude the possibility
# of an xpass when the test is run in a timezone with the same UTC offset
ref_datetime = dtm.datetime(1970, 1, 1, 12)
utc_offset = timezone.utcoffset(ref_datetime)
ref_t, time_of_day = _datetime_to_float_timestamp(ref_datetime), ref_datetime.time()
aware_time_of_day = timezone.localize(ref_datetime).timetz()
# first test that naive time is assumed to be utc:
assert helpers.to_float_timestamp(time_of_day, ref_t) == pytest.approx(ref_t)
# test that by setting the timezone the timestamp changes accordingly:
assert helpers.to_float_timestamp(aware_time_of_day, ref_t) == pytest.approx(
ref_t + (-utc_offset.total_seconds() % (24 * 60 * 60))
)
@pytest.mark.parametrize('time_spec', RELATIVE_TIME_SPECS, ids=str)
def test_to_float_timestamp_default_reference(self, time_spec):
"""The reference timestamp for relative time specifications should default to now"""
now = time.time()
assert helpers.to_float_timestamp(time_spec) == pytest.approx(
helpers.to_float_timestamp(time_spec, reference_timestamp=now)
)
def test_to_float_timestamp_error(self):
with pytest.raises(TypeError, match='Defaults'):
helpers.to_float_timestamp(Defaults())
@pytest.mark.parametrize('time_spec', TIME_SPECS, ids=str)
def test_to_timestamp(self, time_spec):
# delegate tests to `to_float_timestamp`
assert helpers.to_timestamp(time_spec) == int(helpers.to_float_timestamp(time_spec))
def test_to_timestamp_none(self):
# this 'convenience' behaviour has been left left for backwards compatibility
assert helpers.to_timestamp(None) is None
def test_from_timestamp_none(self):
assert helpers.from_timestamp(None) is None
def test_from_timestamp_naive(self):
datetime = dtm.datetime(2019, 11, 11, 0, 26, 16, tzinfo=None)
assert helpers.from_timestamp(1573431976, tzinfo=None) == datetime
def test_from_timestamp_aware(self, timezone):
# we're parametrizing this with two different UTC offsets to exclude the possibility
# of an xpass when the test is run in a timezone with the same UTC offset
test_datetime = dtm.datetime(2019, 11, 11, 0, 26, 16, 10 ** 5)
datetime = timezone.localize(test_datetime)
assert (
helpers.from_timestamp(
1573431976.1 - timezone.utcoffset(test_datetime).total_seconds()
)
== datetime
)
def test_create_deep_linked_url(self):
username = 'JamesTheMock'
payload = "hello"
expected = f"https://t.me/{username}?start={payload}"
actual = helpers.create_deep_linked_url(username, payload)
assert expected == actual
expected = f"https://t.me/{username}?startgroup={payload}"
actual = helpers.create_deep_linked_url(username, payload, group=True)
assert expected == actual
payload = ""
expected = f"https://t.me/{username}"
assert expected == helpers.create_deep_linked_url(username)
assert expected == helpers.create_deep_linked_url(username, payload)
payload = None
assert expected == helpers.create_deep_linked_url(username, payload)
with pytest.raises(ValueError):
helpers.create_deep_linked_url(username, 'text with spaces')
with pytest.raises(ValueError):
helpers.create_deep_linked_url(username, '0' * 65)
with pytest.raises(ValueError):
helpers.create_deep_linked_url(None, None)
with pytest.raises(ValueError): # too short username (4 is minimum)
helpers.create_deep_linked_url("abc", None)
def test_effective_message_type(self):
def build_test_message(**kwargs):
config = dict(
message_id=1,
from_user=None,
date=None,
chat=None,
)
config.update(**kwargs)
return Message(**config)
test_message = build_test_message(text='Test')
assert helpers.effective_message_type(test_message) == 'text'
test_message.text = None
test_message = build_test_message(
sticker=Sticker('sticker_id', 'unique_id', 50, 50, False)
)
assert helpers.effective_message_type(test_message) == 'sticker'
test_message.sticker = None
test_message = build_test_message(new_chat_members=[User(55, 'new_user', False)])
assert helpers.effective_message_type(test_message) == 'new_chat_members'
test_message = build_test_message(left_chat_member=[User(55, 'new_user', False)])
assert helpers.effective_message_type(test_message) == 'left_chat_member'
test_update = Update(1)
test_message = build_test_message(text='Test')
test_update.message = test_message
assert helpers.effective_message_type(test_update) == 'text'
empty_update = Update(2)
assert helpers.effective_message_type(empty_update) is None
def test_mention_html(self):
expected = '<a href="tg://user?id=1">the name</a>'
assert expected == helpers.mention_html(1, 'the name')
def test_mention_markdown(self):
expected = '[the name](tg://user?id=1)'
assert expected == helpers.mention_markdown(1, 'the name')
def test_mention_markdown_2(self):
expected = r'[the\_name](tg://user?id=1)'
assert expected == helpers.mention_markdown(1, 'the_name')
@pytest.mark.parametrize(
'string,expected',
[
('tests/data/game.gif', True),
('tests/data', False),
(str(Path.cwd() / 'tests' / 'data' / 'game.gif'), True),
(str(Path.cwd() / 'tests' / 'data'), False),
(Path.cwd() / 'tests' / 'data' / 'game.gif', True),
(Path.cwd() / 'tests' / 'data', False),
('https:/api.org/file/botTOKEN/document/file_3', False),
(None, False),
],
)
def test_is_local_file(self, string, expected):
assert helpers.is_local_file(string) == expected
@pytest.mark.parametrize(
'string,expected',
[
('tests/data/game.gif', (Path.cwd() / 'tests' / 'data' / 'game.gif').as_uri()),
('tests/data', 'tests/data'),
('file://foobar', 'file://foobar'),
(
str(Path.cwd() / 'tests' / 'data' / 'game.gif'),
(Path.cwd() / 'tests' / 'data' / 'game.gif').as_uri(),
),
(str(Path.cwd() / 'tests' / 'data'), str(Path.cwd() / 'tests' / 'data')),
(
Path.cwd() / 'tests' / 'data' / 'game.gif',
(Path.cwd() / 'tests' / 'data' / 'game.gif').as_uri(),
),
(Path.cwd() / 'tests' / 'data', Path.cwd() / 'tests' / 'data'),
(
'https:/api.org/file/botTOKEN/document/file_3',
'https:/api.org/file/botTOKEN/document/file_3',
),
],
)
def test_parse_file_input_string(self, string, expected):
assert helpers.parse_file_input(string) == expected
def test_parse_file_input_file_like(self):
with open('tests/data/game.gif', 'rb') as file:
parsed = helpers.parse_file_input(file)
assert isinstance(parsed, InputFile)
assert not parsed.attach
assert parsed.filename == 'game.gif'
with open('tests/data/game.gif', 'rb') as file:
parsed = helpers.parse_file_input(file, attach=True, filename='test_file')
assert isinstance(parsed, InputFile)
assert parsed.attach
assert parsed.filename == 'test_file'
def test_parse_file_input_bytes(self):
with open('tests/data/text_file.txt', 'rb') as file:
parsed = helpers.parse_file_input(file.read())
assert isinstance(parsed, InputFile)
assert not parsed.attach
assert parsed.filename == 'application.octet-stream'
with open('tests/data/text_file.txt', 'rb') as file:
parsed = helpers.parse_file_input(file.read(), attach=True, filename='test_file')
assert isinstance(parsed, InputFile)
assert parsed.attach
assert parsed.filename == 'test_file'
def test_parse_file_input_tg_object(self):
animation = Animation('file_id', 'unique_id', 1, 1, 1)
assert helpers.parse_file_input(animation, Animation) == 'file_id'
assert helpers.parse_file_input(animation, MessageEntity) is animation
@pytest.mark.parametrize('obj', [{1: 2}, [1, 2], (1, 2)])
def test_parse_file_input_other(self, obj):
assert helpers.parse_file_input(obj) is obj
| lgpl-3.0 |
saurabh6790/med_new_app | stock/doctype/warehouse/warehouse.py | 18 | 5226 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint, validate_email_add
from webnotes import msgprint, _
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def autoname(self):
suffix = " - " + webnotes.conn.get_value("Company", self.doc.company, "abbr")
if not self.doc.warehouse_name.endswith(suffix):
self.doc.name = self.doc.warehouse_name + suffix
def validate(self):
if self.doc.email_id and not validate_email_add(self.doc.email_id):
msgprint("Please enter valid Email Id", raise_exception=1)
self.update_parent_account()
def update_parent_account(self):
if not self.doc.__islocal and (self.doc.create_account_under !=
webnotes.conn.get_value("Warehouse", self.doc.name, "create_account_under")):
warehouse_account = webnotes.conn.get_value("Account",
{"account_type": "Warehouse", "company": self.doc.company,
"master_name": self.doc.name}, ["name", "parent_account"])
if warehouse_account and warehouse_account[1] != self.doc.create_account_under:
acc_bean = webnotes.bean("Account", warehouse_account[0])
acc_bean.doc.parent_account = self.doc.create_account_under
acc_bean.save()
def on_update(self):
self.create_account_head()
def create_account_head(self):
if cint(webnotes.defaults.get_global_default("auto_accounting_for_stock")):
if not webnotes.conn.get_value("Account", {"account_type": "Warehouse",
"master_name": self.doc.name}) and not webnotes.conn.get_value("Account",
{"account_name": self.doc.warehouse_name}):
if self.doc.fields.get("__islocal") or not webnotes.conn.get_value(
"Stock Ledger Entry", {"warehouse": self.doc.name}):
self.validate_parent_account()
ac_bean = webnotes.bean({
"doctype": "Account",
'account_name': self.doc.warehouse_name,
'parent_account': self.doc.create_account_under,
'group_or_ledger':'Ledger',
'company':self.doc.company,
"account_type": "Warehouse",
"master_name": self.doc.name,
"freeze_account": "No"
})
ac_bean.ignore_permissions = True
ac_bean.insert()
msgprint(_("Account Head") + ": " + ac_bean.doc.name + _(" created"))
def validate_parent_account(self):
if not self.doc.create_account_under:
parent_account = webnotes.conn.get_value("Account",
{"account_name": "Stock Assets", "company": self.doc.company})
if parent_account:
self.doc.create_account_under = parent_account
else:
webnotes.throw(_("Please enter account group under which account \
for warehouse ") + self.doc.name +_(" will be created"))
def on_trash(self):
# delete bin
bins = webnotes.conn.sql("select * from `tabBin` where warehouse = %s",
self.doc.name, as_dict=1)
for d in bins:
if d['actual_qty'] or d['reserved_qty'] or d['ordered_qty'] or \
d['indented_qty'] or d['projected_qty'] or d['planned_qty']:
msgprint("""Warehouse: %s can not be deleted as qty exists for item: %s"""
% (self.doc.name, d['item_code']), raise_exception=1)
else:
webnotes.conn.sql("delete from `tabBin` where name = %s", d['name'])
warehouse_account = webnotes.conn.get_value("Account",
{"account_type": "Warehouse", "master_name": self.doc.name})
if warehouse_account:
webnotes.delete_doc("Account", warehouse_account)
if webnotes.conn.sql("""select name from `tabStock Ledger Entry`
where warehouse = %s""", self.doc.name):
msgprint("""Warehouse can not be deleted as stock ledger entry
exists for this warehouse.""", raise_exception=1)
def before_rename(self, olddn, newdn, merge=False):
# Add company abbr if not provided
from setup.doctype.company.company import get_name_with_abbr
new_warehouse = get_name_with_abbr(newdn, self.doc.company)
if merge:
if not webnotes.conn.exists("Warehouse", newdn):
webnotes.throw(_("Warehouse ") + newdn +_(" does not exists"))
if self.doc.company != webnotes.conn.get_value("Warehouse", new_warehouse, "company"):
webnotes.throw(_("Both Warehouse must belong to same Company"))
webnotes.conn.sql("delete from `tabBin` where warehouse=%s", olddn)
from accounts.utils import rename_account_for
rename_account_for("Warehouse", olddn, new_warehouse, merge)
return new_warehouse
def after_rename(self, olddn, newdn, merge=False):
if merge:
self.recalculate_bin_qty(newdn)
def recalculate_bin_qty(self, newdn):
from utilities.repost_stock import repost_stock
webnotes.conn.auto_commit_on_many_writes = 1
webnotes.conn.set_default("allow_negative_stock", 1)
for item in webnotes.conn.sql("""select distinct item_code from (
select name as item_code from `tabItem` where ifnull(is_stock_item, 'Yes')='Yes'
union
select distinct item_code from tabBin) a"""):
repost_stock(item[0], newdn)
webnotes.conn.set_default("allow_negative_stock",
webnotes.conn.get_value("Stock Settings", None, "allow_negative_stock"))
webnotes.conn.auto_commit_on_many_writes = 0 | agpl-3.0 |
prutseltje/ansible | lib/ansible/modules/packaging/language/yarn.py | 17 | 11746 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017 David Gunter <[email protected]>
# Copyright (c) 2017 Chris Hoffman <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: yarn
short_description: Manage node.js packages with Yarn
description:
- Manage node.js packages with the Yarn package manager (https://yarnpkg.com/)
version_added: "2.6"
author:
- "David Gunter (@verkaufer)"
- "Chris Hoffman (@chrishoffman, creator of NPM Ansible module)"
options:
name:
description:
- The name of a node.js library to install
- If omitted all packages in package.json are installed.
required: false
path:
description:
- The base path where Node.js libraries will be installed.
- This is where the node_modules folder lives.
required: false
version:
description:
- The version of the library to be installed.
- Must be in semver format. If "latest" is desired, use "state" arg instead
required: false
global:
description:
- Install the node.js library globally
required: false
default: no
type: bool
executable:
description:
- The executable location for yarn.
required: false
ignore_scripts:
description:
- Use the --ignore-scripts flag when installing.
required: false
type: bool
default: no
production:
description:
- Install dependencies in production mode.
- Yarn will ignore any dependencies under devDependencies in package.json
required: false
type: bool
default: no
registry:
description:
- The registry to install modules from.
required: false
state:
description:
- Installation state of the named node.js library
- If absent is selected, a name option must be provided
required: false
default: present
choices: [ "present", "absent", "latest" ]
requirements:
- Yarn installed in bin path (typically /usr/local/bin)
'''
EXAMPLES = '''
- name: Install "imagemin" node.js package.
yarn:
name: imagemin
path: /app/location
- name: Install "imagemin" node.js package on version 5.3.1
yarn:
name: imagemin
version: '5.3.1'
path: /app/location
- name: Install "imagemin" node.js package globally.
yarn:
name: imagemin
global: yes
- name: Remove the globally-installed package "imagemin".
yarn:
name: imagemin
global: yes
state: absent
- name: Install "imagemin" node.js package from custom registry.
yarn:
name: imagemin
registry: 'http://registry.mysite.com'
- name: Install packages based on package.json.
yarn:
path: /app/location
- name: Update all packages in package.json to their latest version.
yarn:
path: /app/location
state: latest
'''
RETURN = '''
changed:
description: Whether Yarn changed any package data
returned: always
type: boolean
sample: true
msg:
description: Provides an error message if Yarn syntax was incorrect
returned: failure
type: string
sample: "Package must be explicitly named when uninstalling."
invocation:
description: Parameters and values used during execution
returned: success
type: dictionary
sample: {
"module_args": {
"executable": null,
"globally": false,
"ignore_scripts": false,
"name": null,
"path": "/some/path/folder",
"production": false,
"registry": null,
"state": "present",
"version": null
}
}
out:
description: Output generated from Yarn with emojis removed.
returned: always
type: string
sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4]
Building fresh packages...success Saved lockfile.success Saved 1 new [email protected] Done in 0.59s."
'''
import os
import re
import json
from ansible.module_utils.basic import AnsibleModule
class Yarn(object):
DEFAULT_GLOBAL_INSTALLATION_PATH = '~/.config/yarn/global'
def __init__(self, module, **kwargs):
self.module = module
self.globally = kwargs['globally']
self.name = kwargs['name']
self.version = kwargs['version']
self.path = kwargs['path']
self.registry = kwargs['registry']
self.production = kwargs['production']
self.ignore_scripts = kwargs['ignore_scripts']
# Specify a version of package if version arg passed in
self.name_version = None
if kwargs['executable']:
self.executable = kwargs['executable'].split(' ')
else:
self.executable = [module.get_bin_path('yarn', True)]
if kwargs['version'] and self.name is not None:
self.name_version = self.name + '@' + str(self.version)
def _exec(self, args, run_in_check_mode=False, check_rc=True):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
if self.globally:
# Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`)
args.insert(0, 'global')
cmd = self.executable + args
if self.production:
cmd.append('--production')
if self.ignore_scripts:
cmd.append('--ignore-scripts')
if self.registry:
cmd.append('--registry')
cmd.append(self.registry)
# always run Yarn without emojis when called via Ansible
cmd.append('--no-emoji')
# If path is specified, cd into that path and run the command.
cwd = None
if self.path and not self.globally:
if not os.path.exists(self.path):
# Module will make directory if not exists.
os.makedirs(self.path)
if not os.path.isdir(self.path):
self.module.fail_json(msg="Path provided %s is not a directory" % self.path)
cwd = self.path
if not os.path.isfile(os.path.join(self.path, 'package.json')):
self.module.fail_json(msg="Package.json does not exist in provided path.")
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
return out, err
return ''
def list(self):
cmd = ['list', '--depth=0', '--json']
installed = list()
missing = list()
if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
missing.append(self.name)
return installed, missing
result, error = self._exec(cmd, True, False)
if error:
self.module.fail_json(msg=error)
data = json.loads(result)
try:
dependencies = data['data']['trees']
except KeyError:
missing.append(self.name)
return installed, missing
for dep in dependencies:
name, version = dep['name'].split('@')
installed.append(name)
if self.name not in installed:
missing.append(self.name)
return installed, missing
def install(self):
if self.name_version:
# Yarn has a separate command for installing packages by name...
return self._exec(['add', self.name_version])
# And one for installing all packages in package.json
return self._exec(['install', '--non-interactive'])
def update(self):
return self._exec(['upgrade', '--latest'])
def uninstall(self):
return self._exec(['remove', self.name])
def list_outdated(self):
outdated = list()
if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
return outdated
cmd_result, err = self._exec(['outdated', '--json'], True, False)
if err:
self.module.fail_json(msg=err)
outdated_packages_data = cmd_result.splitlines()[1]
data = json.loads(outdated_packages_data)
try:
outdated_dependencies = data['data']['body']
except KeyError:
return outdated
for dep in outdated_dependencies:
# Outdated dependencies returned as a list of lists, where
# item at index 0 is the name of the dependency
outdated.append(dep[0])
return outdated
def main():
arg_spec = dict(
name=dict(default=None),
path=dict(default=None, type='path'),
version=dict(default=None),
production=dict(default='no', type='bool'),
executable=dict(default=None, type='path'),
registry=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
)
arg_spec['global'] = dict(default='no', type='bool')
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
path = module.params['path']
version = module.params['version']
globally = module.params['global']
production = module.params['production']
executable = module.params['executable']
registry = module.params['registry']
state = module.params['state']
ignore_scripts = module.params['ignore_scripts']
# When installing globally, users should not be able to define a path for installation.
# Require a path if global is False, though!
if path is None and globally is False:
module.fail_json(msg='Path must be specified when not using global arg')
elif path and globally is True:
module.fail_json(msg='Cannot specify path if doing global installation')
if state == 'absent' and not name:
module.fail_json(msg='Package must be explicitly named when uninstalling.')
if state == 'latest':
version = 'latest'
# When installing globally, use the defined path for global node_modules
if globally:
path = Yarn.DEFAULT_GLOBAL_INSTALLATION_PATH
yarn = Yarn(module,
name=name,
path=path,
version=version,
globally=globally,
production=production,
executable=executable,
registry=registry,
ignore_scripts=ignore_scripts)
changed = False
out = ''
err = ''
if state == 'present':
if not name:
changed = True
out, err = yarn.install()
else:
installed, missing = yarn.list()
if len(missing):
changed = True
out, err = yarn.install()
elif state == 'latest':
if not name:
changed = True
out, err = yarn.install()
else:
installed, missing = yarn.list()
outdated = yarn.list_outdated()
if len(missing):
changed = True
out, err = yarn.install()
if len(outdated):
changed = True
out, err = yarn.update()
else:
# state == absent
installed, missing = yarn.list()
if name in installed:
changed = True
out, err = yarn.uninstall()
module.exit_json(changed=changed, out=out, err=err)
if __name__ == '__main__':
main()
| gpl-3.0 |
tersmitten/ansible | lib/ansible/modules/system/cron.py | 14 | 25562 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dane Summers <[email protected]>
# Copyright: (c) 2013, Mike Grozak <[email protected]>
# Copyright: (c) 2013, Patrick Callahan <[email protected]>
# Copyright: (c) 2015, Evan Kaufman <[email protected]>
# Copyright: (c) 2015, Luca Berruti <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cron
short_description: Manage cron.d and crontab entries
description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
which is used by future ansible/module calls to find/check the state. The "name"
parameter should be unique, and changing the "name" value will result in a new cron
task being created (or a different one being removed).'
- When environment variables are managed, no comment line is added, but, when the module
needs to find/check the state, it uses the "name" parameter to find the environment
variable definition line.
- When using symbols such as %, they must be properly escaped.
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry or, if env is set, the name of environment variable.
- Required if C(state=absent).
- Note that if name is not set and C(state=present), then a
new crontab entry will always be created, regardless of existing ones.
- This parameter will always be required in future releases.
type: str
user:
description:
- The specific user whose crontab should be modified.
- When unset, this parameter defaults to using C(root).
type: str
job:
description:
- The command to execute or, if env is set, the value of environment variable.
- The command should not contain line breaks.
- Required if C(state=present).
type: str
aliases: [ value ]
state:
description:
- Whether to ensure the job or environment variable is present or absent.
type: str
choices: [ absent, present ]
default: present
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- If this is a relative path, it is interpreted with respect to I(/etc/cron.d).
- If it is absolute, it will typically be I(/etc/crontab).
- Many linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
- To use the C(cron_file) parameter you must specify the C(user) as well.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
type: bool
default: no
minute:
description:
- Minute when the job should run ( 0-59, *, */2, etc )
type: str
default: "*"
hour:
description:
- Hour when the job should run ( 0-23, *, */2, etc )
type: str
default: "*"
day:
description:
- Day of the month the job should run ( 1-31, *, */2, etc )
type: str
default: "*"
aliases: [ dom ]
month:
description:
- Month of the year the job should run ( 1-12, *, */2, etc )
type: str
default: "*"
weekday:
description:
- Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc )
type: str
default: "*"
aliases: [ dow ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use special_time.
version_added: "1.0"
type: bool
default: no
special_time:
description:
- Special time specification nickname.
type: str
choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
version_added: "1.3"
disabled:
description:
- If the job should be disabled (commented out) in the crontab.
- Only has effect if C(state=present).
type: bool
default: no
version_added: "2.0"
env:
description:
- If set, manages a crontab's environment variable.
- New variables are added on top of crontab.
- C(name) and C(value) parameters are the name and the value of environment variable.
type: bool
default: no
version_added: "2.1"
insertafter:
description:
- Used with C(state=present) and C(env).
- If specified, the environment variable will be inserted after the declaration of specified environment variable.
type: str
version_added: "2.1"
insertbefore:
description:
- Used with C(state=present) and C(env).
- If specified, the environment variable will be inserted before the declaration of specified environment variable.
type: str
version_added: "2.1"
requirements:
- cron
author:
- Dane Summers (@dsummersl)
- Mike Grozak (@rhaido)
- Patrick Callahan (@dirtyharrycallahan)
- Evan Kaufman (@EvanK)
- Luca Berruti (@lberruti)
'''
EXAMPLES = r'''
- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
cron:
name: "check dirs"
minute: "0"
hour: "5,2"
job: "ls -alh > /dev/null"
- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
cron:
name: "an old job"
state: absent
- name: Creates an entry like "@reboot /some/job.sh"
cron:
name: "a job for reboot"
special_time: reboot
job: "/some/job.sh"
- name: Creates an entry like "PATH=/opt/bin" on top of crontab
cron:
name: PATH
env: yes
job: /opt/bin
- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
cron:
name: APP_HOME
env: yes
job: /srv/app
insertafter: PATH
- name: Creates a cron file under /etc/cron.d
cron:
name: yum autoupdate
weekday: 2
minute: 0
hour: 12
user: root
job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
cron_file: ansible_yum-autoupdate
- name: Removes a cron file from under /etc/cron.d
cron:
name: "yum autoupdate"
cron_file: ansible_yum-autoupdate
state: absent
- name: Removes "APP_HOME" environment variable from crontab
cron:
name: APP_HOME
env: yes
state: absent
'''
import os
import platform
import pipes
import pwd
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule, get_platform
CRONCMD = "/usr/bin/crontab"
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
self.existing = ''
if cron_file:
if os.path.isabs(cron_file):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.existing = f.read()
self.lines = self.existing.splitlines()
f.close()
except IOError:
# cron file does not exist
return
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
self.existing = out
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match(r'# \(/tmp/.*installed on.*\)', l) and
not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
else:
pattern = re.escape(l) + '[\r\n]?'
self.existing = re.sub(pattern, '', self.existing, 1)
count += 1
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
os.chmod(path, int('0644', 8))
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
self.module.set_default_selinux_context(self.cron_file, False)
def do_comment(self, name):
return "%s%s" % (self.ansible, name)
def add_job(self, name, job):
# Add the comment
self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def add_env(self, decl, insertafter=None, insertbefore=None):
if not (insertafter or insertbefore):
self.lines.insert(0, decl)
return
if insertafter:
other_name = insertafter
elif insertbefore:
other_name = insertbefore
other_decl = self.find_env(other_name)
if len(other_decl) > 0:
if insertafter:
index = other_decl[0] + 1
elif insertbefore:
index = other_decl[0]
self.lines.insert(index, decl)
return
self.module.fail_json(msg="Variable named '%s' not found." % other_name)
def update_env(self, name, decl):
return self._update_env(name, decl, self.do_add_env)
def do_add_env(self, lines, decl):
lines.append(decl)
def remove_env(self, name):
return self._update_env(name, '', self.do_remove_env)
def do_remove_env(self, lines, decl):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name, job=None):
# attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match(r'%s' % self.ansible, l):
comment = re.sub(r'%s' % self.ansible, '', l)
# failing that, attempt to find job by exact match
if job:
for i, l in enumerate(self.lines):
if l == job:
# if no leading ansible header, insert one
if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
self.lines.insert(i, self.do_comment(name))
return [self.lines[i], l, True]
# if a leading blank ansible header AND job has a name, update header
elif name and self.lines[i - 1] == self.do_comment(None):
self.lines[i - 1] = self.do_comment(name)
return [self.lines[i - 1], l, True]
return []
def find_env(self, name):
for index, l in enumerate(self.lines):
if re.match(r'^%s=' % name, l):
return [index, l]
return []
def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
# normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
job = job.strip('\r\n')
if disabled:
disable_prefix = '#'
else:
disable_prefix = ''
if special:
if self.cron_file:
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
else:
return "%s@%s %s" % (disable_prefix, special, job)
else:
if self.cron_file:
return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
else:
return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match(r'%s' % self.ansible, l):
jobnames.append(re.sub(r'%s' % self.ansible, '', l))
return jobnames
def get_envnames(self):
envnames = []
for l in self.lines:
if re.match(r'^\S+=', l):
envnames.append(l.split('=')[0])
return envnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = self.do_comment(name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def _update_env(self, name, decl, addenvfunction):
newlines = []
for l in self.lines:
if re.match(r'^%s=' % name, l):
addenvfunction(newlines, decl)
else:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result:
result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD))
elif platform.system() == 'AIX':
return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (CRONCMD, '-l', pipes.quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD, user, pipes.quote(path))
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# - name: sets env
# cron: name="PATH" env=yes value="/bin:/usr/bin"
#
# Would produce:
# PATH=/bin:/usr/bin
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str'),
user=dict(type='str'),
job=dict(type='str', aliases=['value']),
cron_file=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
backup=dict(type='bool', default=False),
minute=dict(type='str', default='*'),
hour=dict(type='str', default='*'),
day=dict(type='str', default='*', aliases=['dom']),
month=dict(type='str', default='*'),
weekday=dict(type='str', default='*', aliases=['dow']),
reboot=dict(type='bool', default=False),
special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
disabled=dict(type='bool', default=False),
env=dict(type='bool'),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['reboot', 'special_time'],
['insertafter', 'insertbefore'],
],
required_by=dict(
cron_file=('user',),
),
required_if=(
('state', 'present', ('job',)),
),
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
disabled = module.params['disabled']
env = module.params['env']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
do_install = state == 'present'
changed = False
res_args = dict()
warnings = list()
if cron_file:
cron_file_basename = os.path.basename(cron_file)
if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
' solely of upper- and lower-case letters, digits, underscores, and hyphens')
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
crontab = CronTab(module, user, cron_file)
module.debug('cron instantiated - name: "%s"' % name)
if not name:
module.deprecate(
msg="The 'name' parameter will be required in future releases.",
version='2.12'
)
if reboot:
module.deprecate(
msg="The 'reboot' parameter will be removed in future releases. Use 'special_time' option instead.",
version='2.12'
)
if module._diff:
diff = dict()
diff['before'] = crontab.existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
if crontab.user:
diff['before_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['before_header'] = 'crontab'
# --- user input validation ---
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
# cannot support special_time on solaris
if (special_time or reboot) and get_platform() == 'SunOS':
module.fail_json(msg="Solaris does not support special_time=... or @reboot")
if (insertafter or insertbefore) and not env and do_install:
module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
if reboot:
special_time = "reboot"
# if requested make a backup before making a change
if backup and not module.check_mode:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not name and not do_install:
if module._diff:
diff['after'] = ''
diff['after_header'] = '/dev/null'
else:
diff = dict()
if module.check_mode:
changed = os.path.isfile(crontab.cron_file)
else:
changed = crontab.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
decl = '%s="%s"' % (name, job)
old_decl = crontab.find_env(name)
if do_install:
if len(old_decl) == 0:
crontab.add_env(decl, insertafter, insertbefore)
changed = True
if len(old_decl) > 0 and old_decl[1] != decl:
crontab.update_env(name, decl)
changed = True
else:
if len(old_decl) > 0:
crontab.remove_env(name)
changed = True
else:
if do_install:
for char in ['\r', '\n']:
if char in job.strip('\r\n'):
warnings.append('Job should not contain line breaks')
break
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
old_job = crontab.find_job(name, job)
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
if len(old_job) > 2:
crontab.update_job(name, job)
changed = True
else:
old_job = crontab.find_job(name)
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and crontab.existing != '':
if not (crontab.existing.endswith('\r') or crontab.existing.endswith('\n')):
changed = True
res_args = dict(
jobs=crontab.get_jobnames(),
envs=crontab.get_envnames(),
warnings=warnings,
changed=changed
)
if changed:
if not module.check_mode:
crontab.write()
if module._diff:
diff['after'] = crontab.render()
if crontab.cron_file:
diff['after_header'] = crontab.cron_file
else:
if crontab.user:
diff['after_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['after_header'] = 'crontab'
res_args['diff'] = diff
# retain the backup only if crontab or cron file have changed
if backup and not module.check_mode:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
if __name__ == '__main__':
main()
| gpl-3.0 |
Adai0808/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 127 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
sodexis/odoo | openerp/addons/base/tests/test_ir_actions.py | 291 | 20121 | import unittest2
from openerp.osv.orm import except_orm
import openerp.tests.common as common
from openerp.tools import mute_logger
class TestServerActionsBase(common.TransactionCase):
def setUp(self):
super(TestServerActionsBase, self).setUp()
cr, uid = self.cr, self.uid
# Models
self.ir_actions_server = self.registry('ir.actions.server')
self.ir_actions_client = self.registry('ir.actions.client')
self.ir_values = self.registry('ir.values')
self.ir_model = self.registry('ir.model')
self.ir_model_fields = self.registry('ir.model.fields')
self.res_partner = self.registry('res.partner')
self.res_country = self.registry('res.country')
# Data on which we will run the server action
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry',
'code': 'TY',
'address_format': 'SuperFormat',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner',
'city': 'OrigCity',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Model data
self.res_partner_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.partner')])[0]
self.res_partner_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'name')])[0]
self.res_partner_city_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'city')])[0]
self.res_partner_country_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'country_id')])[0]
self.res_partner_parent_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'parent_id')])[0]
self.res_country_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.country')])[0]
self.res_country_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'name')])[0]
self.res_country_code_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'code')])[0]
# create server action to
self.act_id = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
class TestServerActions(TestServerActionsBase):
def test_00_action(self):
cr, uid = self.cr, self.uid
# Do: eval 'True' condition
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
self.test_partner.write({'comment': False})
# Do: eval False condition, that should be considered as True (void = True)
self.ir_actions_server.write(cr, uid, [self.act_id], {'condition': False})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
# Do: create contextual action
self.ir_actions_server.create_action(cr, uid, [self.act_id])
# Test: ir_values created
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 1, 'ir_actions_server: create_action should have created an entry in ir_values')
ir_value = self.ir_values.browse(cr, uid, ir_values_ids[0])
self.assertEqual(ir_value.value, 'ir.actions.server,%s' % self.act_id, 'ir_actions_server: created ir_values should reference the server action')
self.assertEqual(ir_value.model, 'res.partner', 'ir_actions_server: created ir_values should be linked to the action base model')
# Do: remove contextual action
self.ir_actions_server.unlink_action(cr, uid, [self.act_id])
# Test: ir_values removed
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 0, 'ir_actions_server: unlink_action should remove the ir_values record')
def test_10_code(self):
cr, uid = self.cr, self.uid
self.ir_actions_server.write(cr, uid, self.act_id, {
'state': 'code',
'code': """partner_name = obj.name + '_code'
self.pool["res.partner"].create(cr, uid, {"name": partner_name}, context=context)
workflow"""
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: code server action correctly finished should return False')
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner_code')])
self.assertEqual(len(pids), 1, 'ir_actions_server: 1 new partner should have been created')
def test_20_trigger(self):
cr, uid = self.cr, self.uid
# Data: code server action (at this point code-based actions should work)
act_id2 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction2',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
act_id3 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction3',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_country_model_id,
'state': 'code',
'code': 'obj.write({"code": "ZZ"})',
})
# Data: create workflows
partner_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.partner',
'on_create': True,
})
partner_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerStart',
'wkf_id': partner_wf_id,
'flow_start': True
})
partner_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerTwo',
'wkf_id': partner_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id2,
})
partner_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'partner_trans',
'act_from': partner_act1_id,
'act_to': partner_act2_id
})
country_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.country',
'on_create': True,
})
country_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryStart',
'wkf_id': country_wf_id,
'flow_start': True
})
country_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryTwo',
'wkf_id': country_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id3,
})
country_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'country_trans',
'act_from': country_act1_id,
'act_to': country_act2_id
})
# Data: re-create country and partner to benefit from the workflows
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry2',
'code': 'T2',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner2',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Run the action on partner object itself ('base')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'trigger',
'use_relational_model': 'base',
'wkf_model_id': self.res_partner_model_id,
'wkf_model_name': 'res.partner',
'wkf_transition_id': partner_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: incorrect signal trigger')
# Run the action on related country object ('relational')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_relational_model': 'relational',
'wkf_model_id': self.res_country_model_id,
'wkf_model_name': 'res.country',
'wkf_field_id': self.res_partner_country_field_id,
'wkf_transition_id': country_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_country.refresh()
self.assertEqual(self.test_country.code, 'ZZ', 'ir_actions_server: incorrect signal trigger')
# Clear workflow cache, otherwise openerp will try to create workflows even if it has been deleted
from openerp.workflow import clear_cache
clear_cache(cr, uid)
def test_30_client(self):
cr, uid = self.cr, self.uid
client_action_id = self.registry('ir.actions.client').create(cr, uid, {
'name': 'TestAction2',
'tag': 'Test',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'client_action',
'action_id': client_action_id,
})
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertEqual(res['name'], 'TestAction2', 'ir_actions_server: incorrect return result for a client action')
def test_40_crud_create(self):
cr, uid = self.cr, self.uid
_city = 'TestCity'
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new',
'link_new_record': True,
'link_field_id': self.res_partner_parent_field_id,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': _city})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, _city, 'ir_actions_server: TODO')
# Test: new partner linked
self.test_partner.refresh()
self.assertEqual(self.test_partner.parent_id.id, pids[0], 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_current',
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': 'TestCopyCurrent'}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': 'TestCity'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'TestCity', 'ir_actions_server: TODO')
self.assertEqual(partner.country_id.id, self.test_partner.country_id.id, 'ir_actions_server: TODO')
# Do: create a new record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'obj.name[0:2]', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestingPartner')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'TE', 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'NY', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'NY', 'ir_actions_server: TODO')
self.assertEqual(country.address_format, 'SuperFormat', 'ir_actions_server: TODO')
def test_50_crud_write(self):
cr, uid = self.cr, self.uid
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_write',
'use_write': 'current',
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'OrigCity', 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'other',
'crud_model_id': self.res_country_model_id,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestNew')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'expression',
'crud_model_id': self.res_country_model_id,
'write_expression': 'object.country_id',
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_60_multi(self):
cr, uid = self.cr, self.uid
# Data: 2 server actions that will be nested
act1_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction1',
'sequence': 1,
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_window"}',
})
act2_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction2',
'sequence': 2,
'model_id': self.res_partner_model_id,
'state': 'object_create',
'use_create': 'copy_current',
})
act3_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction3',
'sequence': 3,
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_url"}',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'multi',
'child_ids': [(6, 0, [act1_id, act2_id, act3_id])],
})
# Do: run the action
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
# Test: action returned
self.assertEqual(res.get('type'), 'ir.actions.act_url')
# Test loops
with self.assertRaises(except_orm):
self.ir_actions_server.write(cr, uid, [self.act_id], {
'child_ids': [(6, 0, [self.act_id])]
})
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 |
HKUST-SING/tensorflow | tensorflow/python/training/server_lib_sparse_job_test.py | 133 | 1605 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SparseJobTest(test.TestCase):
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
def testSparseJob(self):
server = server_lib.Server({"local": {37: "localhost:0"}})
with ops.device("/job:local/task:37"):
a = constant_op.constant(1.0)
with session.Session(server.target) as sess:
self.assertEqual(1.0, sess.run(a))
if __name__ == "__main__":
test.main()
| apache-2.0 |
epam-mooc/edx-platform | common/djangoapps/track/contexts.py | 78 | 1504 | """Generates common contexts"""
import logging
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from util.request import COURSE_REGEX
log = logging.getLogger(__name__)
def course_context_from_url(url):
"""
Extracts the course_context from the given `url` and passes it on to
`course_context_from_course_id()`.
"""
url = url or ''
match = COURSE_REGEX.match(url)
course_id = None
if match:
course_id_string = match.group('course_id')
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
except InvalidKeyError:
log.warning(
'unable to parse course_id "{course_id}"'.format(
course_id=course_id_string
),
exc_info=True
)
return course_context_from_course_id(course_id)
def course_context_from_course_id(course_id):
"""
Creates a course context from a `course_id`.
Example Returned Context::
{
'course_id': 'org/course/run',
'org_id': 'org'
}
"""
if course_id is None:
return {'course_id': '', 'org_id': ''}
# TODO: Make this accept any CourseKey, and serialize it using .to_string
assert(isinstance(course_id, CourseKey))
return {
'course_id': course_id.to_deprecated_string(),
'org_id': course_id.org,
}
| agpl-3.0 |
vitan/hue | desktop/core/ext-py/Django-1.6.10/tests/admin_views/customadmin.py | 58 | 2166 | """
A second, custom AdminSite -- see tests.CustomAdminSiteTests.
"""
from __future__ import absolute_import
from django.conf.urls import patterns
from django.contrib import admin
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from . import models, forms, admin as base_admin
class Admin2(admin.AdminSite):
login_form = forms.CustomAdminAuthenticationForm
login_template = 'custom_admin/login.html'
logout_template = 'custom_admin/logout.html'
index_template = ['custom_admin/index.html'] # a list, to test fix for #18697
password_change_template = 'custom_admin/password_change_form.html'
password_change_done_template = 'custom_admin/password_change_done.html'
# A custom index view.
def index(self, request, extra_context=None):
return super(Admin2, self).index(request, {'foo': '*bar*'})
def get_urls(self):
return patterns('',
(r'^my_view/$', self.admin_view(self.my_view)),
) + super(Admin2, self).get_urls()
def my_view(self, request):
return HttpResponse("Django is a magical pony!")
class UserLimitedAdmin(UserAdmin):
# used for testing password change on a user not in queryset
def get_queryset(self, request):
qs = super(UserLimitedAdmin, self).get_queryset(request)
return qs.filter(is_superuser=False)
class CustomPwdTemplateUserAdmin(UserAdmin):
change_user_password_template = ['admin/auth/user/change_password.html'] # a list, to test fix for #18697
site = Admin2(name="admin2")
site.register(models.Article, base_admin.ArticleAdmin)
site.register(models.Section, inlines=[base_admin.ArticleInline])
site.register(models.Thing, base_admin.ThingAdmin)
site.register(models.Fabric, base_admin.FabricAdmin)
site.register(models.ChapterXtra1, base_admin.ChapterXtra1Admin)
site.register(User, UserLimitedAdmin)
site.register(models.UndeletableObject, base_admin.UndeletableObjectAdmin)
site.register(models.Simple, base_admin.AttributeErrorRaisingAdmin)
simple_site = Admin2(name='admin4')
simple_site.register(User, CustomPwdTemplateUserAdmin)
| apache-2.0 |
missionpinball/mpf-mc | mpfmc/tests/MpfIntegrationTestCase.py | 1 | 8859 | import logging
import os
import sys
import mpf.core
os.environ['KIVY_NO_FILELOG'] = '1'
os.environ['KIVY_NO_CONSOLELOG'] = '1'
os.environ["KIVY_NO_ARGS"] = "1"
from queue import Queue
import time
from kivy.config import Config
from kivy.logger import Logger
from kivy.base import runTouchApp, EventLoop
from kivy.clock import Clock
from kivy.uix.widget import Widget as KivyWidget
for handler in Logger.handlers:
Logger.removeHandler(handler)
sys.stderr = sys.__stderr__
import mpfmc
import mpfmc.core
from mpf.tests.MpfBcpTestCase import MockBcpClient
from mpf.tests.MpfTestCase import MpfTestCase, patch, UnitTestConfigLoader
class TestBcpClient(MockBcpClient):
def __init__(self, machine, name, bcp):
super().__init__(machine, name, bcp)
self.queue = Queue()
self.exit_on_close = False
self.fps = 30
self._start_time = time.time()
Clock._start_tick = self._start_time
Clock._last_tick = self._start_time
Clock.time = self._mc_time
Clock._events = [[] for i in range(256)]
with patch("mpfmc.core.bcp_processor.BCPServer"):
self._start_mc()
self.mc_task = self.machine.clock.schedule_interval(self._run_mc, 1 / self.fps)
bcp_mc = self.mc.bcp_processor
bcp_mc.send = self.receive
self.queue = bcp_mc.receive_queue
self.mc.bcp_processor.enabled = True
self.mc.bcp_client_connected = True
self.mc.events.post("client_connected")
def get_absolute_machine_path(self):
# creates an absolute path based on machine_path
return self.machine.machine_path
def get_options(self):
return dict(machine_path=self.get_absolute_machine_path(),
mcconfigfile='mcconfig.yaml',
production=False,
configfile=self.machine.options['configfile'],
no_load_cache=False,
create_config_cache=True,
bcp=False)
def preprocess_config(self, config):
# TODO this method is copied from the mc.py launcher. Prob a better way
kivy_config = config['kivy_config']
try:
kivy_config['graphics'].update(config['displays']['window'])
except KeyError:
pass
try:
kivy_config['graphics'].update(config['window'])
except KeyError:
pass
if 'top' in kivy_config['graphics'] and 'left' in kivy_config['graphics']:
kivy_config['graphics']['position'] = 'custom'
for section, settings in kivy_config.items():
for k, v in settings.items():
try:
if k in Config[section]:
Config.set(section, k, v)
except KeyError:
continue
def _start_app_as_slave(self):
# from app::run
if not self.mc.built:
self.mc.load_config()
self.mc.load_kv(filename=self.mc.kv_file)
root = self.mc.build()
if root:
self.mc.root = root
if self.mc.root:
if not isinstance(self.mc.root, KivyWidget):
Logger.critical('App.root must be an _instance_ of Kivy Widget')
raise Exception('Invalid instance in App.root')
from kivy.core.window import Window
Window.add_widget(self.mc.root)
# Check if the window is already created
from kivy.base import EventLoop
window = EventLoop.window
if window:
self.mc._app_window = window
window.set_title(self.mc.get_application_name())
icon = self.mc.get_application_icon()
if icon:
window.set_icon(icon)
self.mc._install_settings_keys(window)
else:
Logger.critical("Application: No window is created."
" Terminating application run.")
return
self.mc.dispatch('on_start')
runTouchApp(embedded=True) # change is here
while not self.mc.is_init_done.is_set():
EventLoop.idle()
def _start_mc(self):
from mpfmc.core.mc import MpfMc
# prevent sleep in clock
Clock._max_fps = 0
machine_path = self.get_absolute_machine_path()
config_loader = UnitTestConfigLoader(machine_path, self.machine.options['configfile'], {}, {}, {})
config = config_loader.load_mc_config()
self.mc = MpfMc(config=config, options=self.get_options())
from kivy.core.window import Window
Window.create_window()
Window.canvas.clear()
self._start_app_as_slave()
def _mc_time(self):
return self._start_time + self.machine.clock.loop._time
def _run_mc(self):
EventLoop.idle()
def stop(self):
self.mc.stop()
self.machine.clock.unschedule(self.mc_task)
def send(self, bcp_command, kwargs):
self.queue.put((bcp_command, kwargs))
def receive(self, bcp_command, callback=None, rawbytes=None, **kwargs):
if rawbytes:
kwargs['rawbytes'] = rawbytes
self.receive_queue.put_nowait((bcp_command, kwargs))
if callback:
callback()
class MpfIntegrationTestCase(MpfTestCase):
fps = 30
def get_use_bcp(self):
return True
def get_absolute_machine_path(self):
# creates an absolute path based on machine_path
return os.path.abspath(os.path.join(
mpfmc.core.__path__[0], os.pardir, self.get_machine_path()))
def get_enable_plugins(self):
return True
def mock_mc_event(self, event_name):
"""Configure an event to be mocked.
Same as mock_event but for mc in integration test.
"""
self._mc_events[event_name] = 0
self.mc.events.remove_handler_by_event(event=event_name, handler=self._mock_mc_event_handler)
self.mc.events.add_handler(event=event_name,
handler=self._mock_mc_event_handler,
event_name=event_name)
def _mock_mc_event_handler(self, event_name, **kwargs):
self._last_mc_event_kwargs[event_name] = kwargs
self._mc_events[event_name] += 1
def assertMcEventNotCalled(self, event_name):
"""Assert that event was not called.
Same as mock_event but for mc in integration test.
"""
if event_name not in self._mc_events:
raise AssertionError("Event {} not mocked.".format(event_name))
if self._mc_events[event_name] != 0:
raise AssertionError("Event {} was called {} times.".format(event_name, self._mc_events[event_name]))
def assertMcEventCalled(self, event_name, times=None):
"""Assert that event was called.
Same as mock_event but for mc in integration test.
"""
if event_name not in self._mc_events:
raise AssertionError("Event {} not mocked.".format(event_name))
if self._mc_events[event_name] == 0 and times != 0:
raise AssertionError("Event {} was not called.".format(event_name))
if times is not None and self._mc_events[event_name] != times:
raise AssertionError("Event {} was called {} instead of {}.".format(
event_name, self._mc_events[event_name], times))
def __init__(self, methodName):
super().__init__(methodName)
self._mc_events = {}
self._last_mc_event_kwargs = {}
self.console_logger = None
try:
del self.machine_config_patches['mpf']['plugins']
except KeyError:
pass
self.machine_config_patches['bcp'] = \
{"connections": {"local_display": {"type": "mpfmc.tests.MpfIntegrationTestCase.TestBcpClient"}}}
self.machine_config_patches['bcp']['servers'] = []
self.expected_duration = 60
def setUp(self):
if self.unittest_verbosity() > 1:
self.console_logger = logging.StreamHandler()
self.console_logger.setLevel(logging.DEBUG)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)s: %(message)s')
# tell the handler to use this format
self.console_logger.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(self.console_logger)
super().setUp()
client = self.machine.bcp.transport.get_named_client("local_display")
self.mc = client.mc
self.advance_time_and_run()
def tearDown(self):
super().tearDown()
EventLoop.close()
if self.console_logger:
logging.getLogger('').removeFilter(self.console_logger)
self.console_logger = None
| mit |
KanchanChauhan/erpnext | erpnext/stock/doctype/delivery_note/test_delivery_note.py | 5 | 19129 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
import json
import frappe.defaults
from frappe.utils import cint, nowdate, nowtime, cstr, add_days, flt, today
from erpnext.stock.stock_ledger import get_previous_sle
from erpnext.accounts.utils import get_balance_on
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt \
import get_gl_entries, set_perpetual_inventory
from erpnext.stock.doctype.delivery_note.delivery_note import make_sales_invoice
from erpnext.stock.doctype.stock_entry.test_stock_entry \
import make_stock_entry, make_serialized_item, get_qty_after_transaction
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos, SerialNoWarehouseError
from erpnext.stock.doctype.stock_reconciliation.test_stock_reconciliation \
import create_stock_reconciliation, set_valuation_method
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order, create_dn_against_so
class TestDeliveryNote(unittest.TestCase):
def test_over_billing_against_dn(self):
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
dn = create_delivery_note(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_sales_invoice, dn.name)
dn.submit()
si = make_sales_invoice(dn.name)
self.assertEquals(len(si.get("items")), len(dn.get("items")))
# modify amount
si.get("items")[0].rate = 200
self.assertRaises(frappe.ValidationError, frappe.get_doc(si).insert)
def test_delivery_note_no_gl_entry(self):
set_perpetual_inventory(0)
self.assertEqual(cint(frappe.defaults.get_global_default("auto_accounting_for_stock")), 0)
make_stock_entry(target="_Test Warehouse - _TC", qty=5, basic_rate=100)
stock_queue = json.loads(get_previous_sle({
"item_code": "_Test Item",
"warehouse": "_Test Warehouse - _TC",
"posting_date": nowdate(),
"posting_time": nowtime()
}).stock_queue or "[]")
dn = create_delivery_note()
sle = frappe.get_doc("Stock Ledger Entry", {"voucher_type": "Delivery Note", "voucher_no": dn.name})
self.assertEqual(sle.stock_value_difference, -1*stock_queue[0][1])
self.assertFalse(get_gl_entries("Delivery Note", dn.name))
def test_delivery_note_gl_entry(self):
set_perpetual_inventory()
self.assertEqual(cint(frappe.defaults.get_global_default("auto_accounting_for_stock")), 1)
set_valuation_method("_Test Item", "FIFO")
make_stock_entry(target="_Test Warehouse - _TC", qty=5, basic_rate=100)
stock_in_hand_account = frappe.db.get_value("Account", {"warehouse": "_Test Warehouse - _TC"})
prev_bal = get_balance_on(stock_in_hand_account)
dn = create_delivery_note()
gl_entries = get_gl_entries("Delivery Note", dn.name)
self.assertTrue(gl_entries)
stock_value_difference = abs(frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn.name}, "stock_value_difference"))
expected_values = {
stock_in_hand_account: [0.0, stock_value_difference],
"Cost of Goods Sold - _TC": [stock_value_difference, 0.0]
}
for i, gle in enumerate(gl_entries):
self.assertEquals([gle.debit, gle.credit], expected_values.get(gle.account))
# check stock in hand balance
bal = get_balance_on(stock_in_hand_account)
self.assertEquals(bal, prev_bal - stock_value_difference)
# back dated incoming entry
make_stock_entry(posting_date=add_days(nowdate(), -2), target="_Test Warehouse - _TC",
qty=5, basic_rate=100)
gl_entries = get_gl_entries("Delivery Note", dn.name)
self.assertTrue(gl_entries)
stock_value_difference = abs(frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn.name}, "stock_value_difference"))
expected_values = {
stock_in_hand_account: [0.0, stock_value_difference],
"Cost of Goods Sold - _TC": [stock_value_difference, 0.0]
}
for i, gle in enumerate(gl_entries):
self.assertEquals([gle.debit, gle.credit], expected_values.get(gle.account))
dn.cancel()
self.assertFalse(get_gl_entries("Delivery Note", dn.name))
set_perpetual_inventory(0)
def test_delivery_note_gl_entry_packing_item(self):
set_perpetual_inventory()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=10, basic_rate=100)
make_stock_entry(item_code="_Test Item Home Desktop 100",
target="_Test Warehouse - _TC", qty=10, basic_rate=100)
stock_in_hand_account = frappe.db.get_value("Account", {"warehouse": "_Test Warehouse - _TC"})
prev_bal = get_balance_on(stock_in_hand_account)
dn = create_delivery_note(item_code="_Test Product Bundle Item")
stock_value_diff_rm1 = abs(frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn.name, "item_code": "_Test Item"},
"stock_value_difference"))
stock_value_diff_rm2 = abs(frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn.name,
"item_code": "_Test Item Home Desktop 100"}, "stock_value_difference"))
stock_value_diff = stock_value_diff_rm1 + stock_value_diff_rm2
gl_entries = get_gl_entries("Delivery Note", dn.name)
self.assertTrue(gl_entries)
expected_values = {
stock_in_hand_account: [0.0, stock_value_diff],
"Cost of Goods Sold - _TC": [stock_value_diff, 0.0]
}
for i, gle in enumerate(gl_entries):
self.assertEquals([gle.debit, gle.credit], expected_values.get(gle.account))
# check stock in hand balance
bal = get_balance_on(stock_in_hand_account)
self.assertEquals(flt(bal, 2), flt(prev_bal - stock_value_diff, 2))
dn.cancel()
self.assertFalse(get_gl_entries("Delivery Note", dn.name))
set_perpetual_inventory(0)
def test_serialized(self):
se = make_serialized_item()
serial_no = get_serial_nos(se.get("items")[0].serial_no)[0]
dn = create_delivery_note(item_code="_Test Serialized Item With Series", serial_no=serial_no)
self.check_serial_no_values(serial_no, {
"warehouse": "",
"delivery_document_no": dn.name
})
dn.cancel()
self.check_serial_no_values(serial_no, {
"warehouse": "_Test Warehouse - _TC",
"delivery_document_no": ""
})
def test_serialize_status(self):
from frappe.model.naming import make_autoname
serial_no = frappe.get_doc({
"doctype": "Serial No",
"item_code": "_Test Serialized Item With Series",
"serial_no": make_autoname("SR", "Serial No")
})
serial_no.save()
dn = create_delivery_note(item_code="_Test Serialized Item With Series",
serial_no=serial_no.name, do_not_submit=True)
self.assertRaises(SerialNoWarehouseError, dn.submit)
def check_serial_no_values(self, serial_no, field_values):
serial_no = frappe.get_doc("Serial No", serial_no)
for field, value in field_values.items():
self.assertEquals(cstr(serial_no.get(field)), value)
def test_sales_return_for_non_bundled_items(self):
set_perpetual_inventory()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, basic_rate=100)
actual_qty_0 = get_qty_after_transaction()
dn = create_delivery_note(qty=5, rate=500)
actual_qty_1 = get_qty_after_transaction()
self.assertEquals(actual_qty_0 - 5, actual_qty_1)
# outgoing_rate
outgoing_rate = frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Delivery Note",
"voucher_no": dn.name}, "stock_value_difference") / 5
# return entry
dn1 = create_delivery_note(is_return=1, return_against=dn.name, qty=-2, rate=500)
actual_qty_2 = get_qty_after_transaction()
self.assertEquals(actual_qty_1 + 2, actual_qty_2)
incoming_rate, stock_value_difference = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn1.name},
["incoming_rate", "stock_value_difference"])
self.assertEquals(flt(incoming_rate, 3), abs(flt(outgoing_rate, 3)))
gle_warehouse_amount = frappe.db.get_value("GL Entry", {"voucher_type": "Delivery Note",
"voucher_no": dn1.name, "account": "_Test Warehouse - _TC"}, "debit")
self.assertEquals(gle_warehouse_amount, stock_value_difference)
set_perpetual_inventory(0)
def test_return_single_item_from_bundled_items(self):
set_perpetual_inventory()
create_stock_reconciliation(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, rate=100)
create_stock_reconciliation(item_code="_Test Item Home Desktop 100", target="_Test Warehouse - _TC",
qty=50, rate=100)
dn = create_delivery_note(item_code="_Test Product Bundle Item", qty=5, rate=500)
# Qty after delivery
actual_qty_1 = get_qty_after_transaction()
self.assertEquals(actual_qty_1, 25)
# outgoing_rate
outgoing_rate = frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Delivery Note",
"voucher_no": dn.name, "item_code": "_Test Item"}, "stock_value_difference") / 25
# return 'test item' from packed items
dn1 = create_delivery_note(is_return=1, return_against=dn.name, qty=-10, rate=500)
# qty after return
actual_qty_2 = get_qty_after_transaction()
self.assertEquals(actual_qty_2, 35)
# Check incoming rate for return entry
incoming_rate, stock_value_difference = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn1.name},
["incoming_rate", "stock_value_difference"])
self.assertEquals(flt(incoming_rate, 3), abs(flt(outgoing_rate, 3)))
# Check gl entry for warehouse
gle_warehouse_amount = frappe.db.get_value("GL Entry", {"voucher_type": "Delivery Note",
"voucher_no": dn1.name, "account": "_Test Warehouse - _TC"}, "debit")
self.assertEquals(gle_warehouse_amount, stock_value_difference)
set_perpetual_inventory(0)
def test_return_entire_bundled_items(self):
set_perpetual_inventory()
create_stock_reconciliation(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, rate=100)
create_stock_reconciliation(item_code="_Test Item Home Desktop 100", target="_Test Warehouse - _TC",
qty=50, rate=100)
dn = create_delivery_note(item_code="_Test Product Bundle Item", qty=5, rate=500)
# return bundled item
dn1 = create_delivery_note(item_code='_Test Product Bundle Item', is_return=1,
return_against=dn.name, qty=-2, rate=500)
# qty after return
actual_qty = get_qty_after_transaction()
self.assertEquals(actual_qty, 35)
# Check incoming rate for return entry
incoming_rate, stock_value_difference = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn1.name},
["incoming_rate", "stock_value_difference"])
self.assertEquals(incoming_rate, 100)
# Check gl entry for warehouse
gle_warehouse_amount = frappe.db.get_value("GL Entry", {"voucher_type": "Delivery Note",
"voucher_no": dn1.name, "account": "_Test Warehouse - _TC"}, "debit")
self.assertEquals(gle_warehouse_amount, 1400)
set_perpetual_inventory(0)
def test_return_for_serialized_items(self):
se = make_serialized_item()
serial_no = get_serial_nos(se.get("items")[0].serial_no)[0]
dn = create_delivery_note(item_code="_Test Serialized Item With Series", rate=500, serial_no=serial_no)
self.check_serial_no_values(serial_no, {
"warehouse": "",
"delivery_document_no": dn.name
})
# return entry
dn1 = create_delivery_note(item_code="_Test Serialized Item With Series",
is_return=1, return_against=dn.name, qty=-1, rate=500, serial_no=serial_no)
self.check_serial_no_values(serial_no, {
"warehouse": "_Test Warehouse - _TC",
"delivery_document_no": ""
})
dn1.cancel()
self.check_serial_no_values(serial_no, {
"warehouse": "",
"delivery_document_no": dn.name
})
dn.cancel()
self.check_serial_no_values(serial_no, {
"warehouse": "_Test Warehouse - _TC",
"delivery_document_no": "",
"purchase_document_no": se.name
})
def test_delivery_of_bundled_items_to_target_warehouse(self):
set_perpetual_inventory()
set_valuation_method("_Test Item", "FIFO")
set_valuation_method("_Test Item Home Desktop 100", "FIFO")
for warehouse in ("_Test Warehouse - _TC", "_Test Warehouse 1 - _TC"):
create_stock_reconciliation(item_code="_Test Item", target=warehouse,
qty=100, rate=100)
create_stock_reconciliation(item_code="_Test Item Home Desktop 100",
target=warehouse, qty=100, rate=100)
opening_qty_test_warehouse_1 = get_qty_after_transaction(warehouse="_Test Warehouse 1 - _TC")
dn = create_delivery_note(item_code="_Test Product Bundle Item",
qty=5, rate=500, target_warehouse="_Test Warehouse 1 - _TC", do_not_submit=True)
dn.submit()
# qty after delivery
actual_qty = get_qty_after_transaction(warehouse="_Test Warehouse - _TC")
self.assertEquals(actual_qty, 75)
actual_qty = get_qty_after_transaction(warehouse="_Test Warehouse 1 - _TC")
self.assertEquals(actual_qty, opening_qty_test_warehouse_1 + 25)
# stock value diff for source warehouse
# for "_Test Item"
stock_value_difference = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn.name,
"item_code": "_Test Item", "warehouse": "_Test Warehouse - _TC"},
"stock_value_difference")
# stock value diff for target warehouse
stock_value_difference1 = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn.name,
"item_code": "_Test Item", "warehouse": "_Test Warehouse 1 - _TC"},
"stock_value_difference")
self.assertEquals(abs(stock_value_difference), stock_value_difference1)
# for "_Test Item Home Desktop 100"
stock_value_difference = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn.name,
"item_code": "_Test Item Home Desktop 100", "warehouse": "_Test Warehouse - _TC"},
"stock_value_difference")
# stock value diff for target warehouse
stock_value_difference1 = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Delivery Note", "voucher_no": dn.name,
"item_code": "_Test Item Home Desktop 100", "warehouse": "_Test Warehouse 1 - _TC"},
"stock_value_difference")
self.assertEquals(abs(stock_value_difference), stock_value_difference1)
# Check gl entries
gl_entries = get_gl_entries("Delivery Note", dn.name)
self.assertTrue(gl_entries)
stock_value_difference = abs(frappe.db.sql("""select sum(stock_value_difference)
from `tabStock Ledger Entry` where voucher_type='Delivery Note' and voucher_no=%s
and warehouse='_Test Warehouse - _TC'""", dn.name)[0][0])
expected_values = {
"_Test Warehouse - _TC": [0.0, stock_value_difference],
"_Test Warehouse 1 - _TC": [stock_value_difference, 0.0]
}
for i, gle in enumerate(gl_entries):
self.assertEquals([gle.debit, gle.credit], expected_values.get(gle.account))
set_perpetual_inventory(0)
def test_closed_delivery_note(self):
from erpnext.stock.doctype.delivery_note.delivery_note import update_delivery_note_status
dn = create_delivery_note(do_not_submit=True)
dn.submit()
update_delivery_note_status(dn.name, "Closed")
self.assertEquals(frappe.db.get_value("Delivery Note", dn.name, "Status"), "Closed")
def test_dn_billing_status_case1(self):
# SO -> DN -> SI
so = make_sales_order()
dn = create_dn_against_so(so.name, delivered_qty=2)
self.assertEqual(dn.status, "To Bill")
self.assertEqual(dn.per_billed, 0)
si = make_sales_invoice(dn.name)
si.submit()
dn.load_from_db()
self.assertEqual(dn.get("items")[0].billed_amt, 200)
self.assertEqual(dn.per_billed, 100)
self.assertEqual(dn.status, "Completed")
def test_dn_billing_status_case2(self):
# SO -> SI and SO -> DN1, DN2
from erpnext.selling.doctype.sales_order.sales_order import make_delivery_note, make_sales_invoice
so = make_sales_order()
si = make_sales_invoice(so.name)
si.get("items")[0].qty = 5
si.insert()
si.submit()
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
dn1 = make_delivery_note(so.name)
dn1.set_posting_time = 1
dn1.posting_time = "10:00"
dn1.get("items")[0].qty = 2
dn1.submit()
self.assertEqual(dn1.get("items")[0].billed_amt, 200)
self.assertEqual(dn1.per_billed, 100)
self.assertEqual(dn1.status, "Completed")
dn2 = make_delivery_note(so.name)
dn2.set_posting_time = 1
dn2.posting_time = "08:00"
dn2.get("items")[0].qty = 4
dn2.submit()
dn1.load_from_db()
self.assertEqual(dn1.get("items")[0].billed_amt, 100)
self.assertEqual(dn1.per_billed, 50)
self.assertEqual(dn1.status, "To Bill")
self.assertEqual(dn2.get("items")[0].billed_amt, 400)
self.assertEqual(dn2.per_billed, 100)
self.assertEqual(dn2.status, "Completed")
def test_dn_billing_status_case3(self):
# SO -> DN1 -> SI and SO -> SI and SO -> DN2
from erpnext.selling.doctype.sales_order.sales_order \
import make_delivery_note, make_sales_invoice as make_sales_invoice_from_so
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
so = make_sales_order()
dn1 = make_delivery_note(so.name)
dn1.set_posting_time = 1
dn1.posting_time = "10:00"
dn1.get("items")[0].qty = 2
dn1.submit()
si1 = make_sales_invoice(dn1.name)
si1.submit()
dn1.load_from_db()
self.assertEqual(dn1.per_billed, 100)
si2 = make_sales_invoice_from_so(so.name)
si2.get("items")[0].qty = 4
si2.submit()
dn2 = make_delivery_note(so.name)
dn2.posting_time = "08:00"
dn2.get("items")[0].qty = 5
dn2.submit()
dn1.load_from_db()
self.assertEqual(dn1.get("items")[0].billed_amt, 200)
self.assertEqual(dn1.per_billed, 100)
self.assertEqual(dn1.status, "Completed")
self.assertEqual(dn2.get("items")[0].billed_amt, 400)
self.assertEqual(dn2.per_billed, 80)
self.assertEqual(dn2.status, "To Bill")
def test_dn_billing_status_case4(self):
# SO -> SI -> DN
from erpnext.selling.doctype.sales_order.sales_order import make_sales_invoice
from erpnext.accounts.doctype.sales_invoice.sales_invoice import make_delivery_note
so = make_sales_order()
si = make_sales_invoice(so.name)
si.submit()
dn = make_delivery_note(si.name)
dn.submit()
self.assertEqual(dn.get("items")[0].billed_amt, 1000)
self.assertEqual(dn.per_billed, 100)
self.assertEqual(dn.status, "Completed")
def create_delivery_note(**args):
dn = frappe.new_doc("Delivery Note")
args = frappe._dict(args)
dn.posting_date = args.posting_date or today()
if args.posting_time:
dn.posting_time = args.posting_time
dn.company = args.company or "_Test Company"
dn.customer = args.customer or "_Test Customer"
dn.currency = args.currency or "INR"
dn.is_return = args.is_return
dn.return_against = args.return_against
dn.append("items", {
"item_code": args.item or args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"qty": args.qty or 1,
"rate": args.rate or 100,
"conversion_factor": 1.0,
"expense_account": "Cost of Goods Sold - _TC",
"cost_center": "_Test Cost Center - _TC",
"serial_no": args.serial_no,
"target_warehouse": args.target_warehouse
})
if not args.do_not_save:
dn.insert()
if not args.do_not_submit:
dn.submit()
return dn
test_dependencies = ["Product Bundle"]
| gpl-3.0 |
EttusResearch/gnuradio | gr-digital/examples/narrowband/benchmark_add_channel.py | 41 | 4246 | #!/usr/bin/env python
#
# Copyright 2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import random, math, sys
class my_top_block(gr.top_block):
def __init__(self, ifile, ofile, options):
gr.top_block.__init__(self)
SNR = 10.0**(options.snr/10.0)
frequency_offset = options.frequency_offset
time_offset = options.time_offset
phase_offset = options.phase_offset*(math.pi/180.0)
# calculate noise voltage from SNR
power_in_signal = abs(options.tx_amplitude)**2
noise_power = power_in_signal/SNR
noise_voltage = math.sqrt(noise_power)
self.src = blocks.file_source(gr.sizeof_gr_complex, ifile)
#self.throttle = blocks.throttle(gr.sizeof_gr_complex, options.sample_rate)
self.channel = filter.channel_model(noise_voltage, frequency_offset,
time_offset, noise_seed=-random.randint(0,100000))
self.phase = blocks.multiply_const_cc(complex(math.cos(phase_offset),
math.sin(phase_offset)))
self.snk = blocks.file_sink(gr.sizeof_gr_complex, ofile)
self.connect(self.src, self.channel, self.phase, self.snk)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
# Create Options Parser:
usage = "benchmack_add_channel.py [options] <input file> <output file>"
parser = OptionParser (usage=usage, option_class=eng_option, conflict_handler="resolve")
parser.add_option("-n", "--snr", type="eng_float", default=30,
help="set the SNR of the channel in dB [default=%default]")
parser.add_option("", "--seed", action="store_true", default=False,
help="use a random seed for AWGN noise [default=%default]")
parser.add_option("-f", "--frequency-offset", type="eng_float", default=0,
help="set frequency offset introduced by channel [default=%default]")
parser.add_option("-t", "--time-offset", type="eng_float", default=1.0,
help="set timing offset between Tx and Rx [default=%default]")
parser.add_option("-p", "--phase-offset", type="eng_float", default=0,
help="set phase offset (in degrees) between Tx and Rx [default=%default]")
parser.add_option("-m", "--use-multipath", action="store_true", default=False,
help="Use a multipath channel [default=%default]")
parser.add_option("", "--tx-amplitude", type="eng_float",
default=1.0,
help="tell the simulator the signal amplitude [default=%default]")
(options, args) = parser.parse_args ()
if len(args) != 2:
parser.print_help(sys.stderr)
sys.exit(1)
ifile = args[0]
ofile = args[1]
# build the graph
tb = my_top_block(ifile, ofile, options)
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print "Warning: Failed to enable realtime scheduling."
tb.start() # start flow graph
tb.wait() # wait for it to finish
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
ChristinaZografou/sympy | sympy/functions/elementary/hyperbolic.py | 6 | 35314 | from __future__ import print_function, division
from sympy.core import S, sympify, cacheit
from sympy.core.function import Function, ArgumentIndexError, _coeff_isneg
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.combinatorial.factorials import factorial, RisingFactorial
def _rewrite_hyperbolics_as_exp(expr):
expr = sympify(expr)
return expr.xreplace(dict([(h, h.rewrite(exp))
for h in expr.atoms(HyperbolicFunction)]))
###############################################################################
########################### HYPERBOLIC FUNCTIONS ##############################
###############################################################################
class HyperbolicFunction(Function):
"""
Base class for hyperbolic functions.
See Also
========
sinh, cosh, tanh, coth
"""
unbranched = True
class sinh(HyperbolicFunction):
r"""
The hyperbolic sine function, `\frac{e^x - e^{-x}}{2}`.
* sinh(x) -> Returns the hyperbolic sine of x
See Also
========
cosh, tanh, asinh
"""
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return cosh(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return asinh
@classmethod
def eval(cls, arg):
from sympy import sin
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.NegativeInfinity
elif arg is S.Zero:
return S.Zero
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * sin(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.func == asinh:
return arg.args[0]
if arg.func == acosh:
x = arg.args[0]
return sqrt(x - 1) * sqrt(x + 1)
if arg.func == atanh:
x = arg.args[0]
return x/sqrt(1 - x**2)
if arg.func == acoth:
x = arg.args[0]
return 1/(sqrt(x - 1) * sqrt(x + 1))
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Returns the next term in the Taylor series expansion.
"""
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2:
p = previous_terms[-2]
return p * x**2 / (n*(n - 1))
else:
return x**(n) / factorial(n)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a complex coordinate.
"""
from sympy import cos, sin
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (sinh(re)*cos(im), cosh(re)*sin(im))
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_expand_trig(self, deep=True, **hints):
if deep:
arg = self.args[0].expand(deep, **hints)
else:
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
else:
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One and coeff.is_Integer and terms is not S.One:
x = terms
y = (coeff - 1)*x
if x is not None:
return (sinh(x)*cosh(y) + sinh(y)*cosh(x)).expand(trig=True)
return sinh(arg)
def _eval_rewrite_as_tractable(self, arg):
return (exp(arg) - exp(-arg)) / 2
def _eval_rewrite_as_exp(self, arg):
return (exp(arg) - exp(-arg)) / 2
def _eval_rewrite_as_cosh(self, arg):
return -S.ImaginaryUnit*cosh(arg + S.Pi*S.ImaginaryUnit/2)
def _eval_rewrite_as_tanh(self, arg):
tanh_half = tanh(S.Half*arg)
return 2*tanh_half/(1 - tanh_half**2)
def _eval_rewrite_as_coth(self, arg):
coth_half = coth(S.Half*arg)
return 2*coth_half/(coth_half**2 - 1)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_imaginary:
return True
class cosh(HyperbolicFunction):
r"""
The hyperbolic cosine function, `\frac{e^x + e^{-x}}{2}`.
* cosh(x) -> Returns the hyperbolic cosine of x
See Also
========
sinh, tanh, acosh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return sinh(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import cos
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg is S.Zero:
return S.One
elif arg.is_negative:
return cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return cos(i_coeff)
else:
if _coeff_isneg(arg):
return cls(-arg)
if arg.func == asinh:
return sqrt(1 + arg.args[0]**2)
if arg.func == acosh:
return arg.args[0]
if arg.func == atanh:
return 1/sqrt(1 - arg.args[0]**2)
if arg.func == acoth:
x = arg.args[0]
return x/(sqrt(x - 1) * sqrt(x + 1))
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2:
p = previous_terms[-2]
return p * x**2 / (n*(n - 1))
else:
return x**(n)/factorial(n)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy import cos, sin
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (cosh(re)*cos(im), sinh(re)*sin(im))
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_expand_trig(self, deep=True, **hints):
if deep:
arg = self.args[0].expand(deep, **hints)
else:
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
else:
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One and coeff.is_Integer and terms is not S.One:
x = terms
y = (coeff - 1)*x
if x is not None:
return (cosh(x)*cosh(y) + sinh(x)*sinh(y)).expand(trig=True)
return cosh(arg)
def _eval_rewrite_as_tractable(self, arg):
return (exp(arg) + exp(-arg)) / 2
def _eval_rewrite_as_exp(self, arg):
return (exp(arg) + exp(-arg)) / 2
def _eval_rewrite_as_sinh(self, arg):
return -S.ImaginaryUnit*sinh(arg + S.Pi*S.ImaginaryUnit/2)
def _eval_rewrite_as_tanh(self, arg):
tanh_half = tanh(S.Half*arg)**2
return (1 + tanh_half)/(1 - tanh_half)
def _eval_rewrite_as_coth(self, arg):
coth_half = coth(S.Half*arg)**2
return (coth_half + 1)/(coth_half - 1)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return S.One
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_imaginary:
return True
class tanh(HyperbolicFunction):
r"""
The hyperbolic tangent function, `\frac{\sinh(x)}{\cosh(x)}`.
* tanh(x) -> Returns the hyperbolic tangent of x
See Also
========
sinh, cosh, atanh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return S.One - tanh(self.args[0])**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return atanh
@classmethod
def eval(cls, arg):
from sympy import tan
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg is S.Zero:
return S.Zero
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
if _coeff_isneg(i_coeff):
return -S.ImaginaryUnit * tan(-i_coeff)
return S.ImaginaryUnit * tan(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.func == asinh:
x = arg.args[0]
return x/sqrt(1 + x**2)
if arg.func == acosh:
x = arg.args[0]
return sqrt(x - 1) * sqrt(x + 1) / x
if arg.func == atanh:
return arg.args[0]
if arg.func == acoth:
return 1/arg.args[0]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from sympy import bernoulli
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
a = 2**(n + 1)
B = bernoulli(n + 1)
F = factorial(n + 1)
return a*(a - 1) * B/F * x**n
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy import cos, sin
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = sinh(re)**2 + cos(im)**2
return (sinh(re)*cosh(re)/denom, sin(im)*cos(im)/denom)
def _eval_rewrite_as_tractable(self, arg):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp - neg_exp)/(pos_exp + neg_exp)
def _eval_rewrite_as_exp(self, arg):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp - neg_exp)/(pos_exp + neg_exp)
def _eval_rewrite_as_sinh(self, arg):
return S.ImaginaryUnit*sinh(arg)/sinh(S.Pi*S.ImaginaryUnit/2 - arg)
def _eval_rewrite_as_cosh(self, arg):
return S.ImaginaryUnit*cosh(S.Pi*S.ImaginaryUnit/2 - arg)/cosh(arg)
def _eval_rewrite_as_coth(self, arg):
return 1/coth(arg)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_real:
return True
class coth(HyperbolicFunction):
r"""
The hyperbolic cotangent function, `\frac{\cosh(x)}{\sinh(x)}`.
* coth(x) -> Returns the hyperbolic cotangent of x
"""
def fdiff(self, argindex=1):
if argindex == 1:
return -1/sinh(self.args[0])**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return acoth
@classmethod
def eval(cls, arg):
from sympy import cot
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg is S.Zero:
return S.ComplexInfinity
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
if _coeff_isneg(i_coeff):
return S.ImaginaryUnit * cot(-i_coeff)
return -S.ImaginaryUnit * cot(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.func == asinh:
x = arg.args[0]
return sqrt(1 + x**2)/x
if arg.func == acosh:
x = arg.args[0]
return x/(sqrt(x - 1) * sqrt(x + 1))
if arg.func == atanh:
return 1/arg.args[0]
if arg.func == acoth:
return arg.args[0]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from sympy import bernoulli
if n == 0:
return 1 / sympify(x)
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
B = bernoulli(n + 1)
F = factorial(n + 1)
return 2**(n + 1) * B/F * x**n
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy import cos, sin
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = sinh(re)**2 + sin(im)**2
return (sinh(re)*cosh(re)/denom, -sin(im)*cos(im)/denom)
def _eval_rewrite_as_tractable(self, arg):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp + neg_exp)/(pos_exp - neg_exp)
def _eval_rewrite_as_exp(self, arg):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp + neg_exp)/(pos_exp - neg_exp)
def _eval_rewrite_as_sinh(self, arg):
return -S.ImaginaryUnit*sinh(S.Pi*S.ImaginaryUnit/2 - arg)/sinh(arg)
def _eval_rewrite_as_cosh(self, arg):
return -S.ImaginaryUnit*cosh(arg)/cosh(S.Pi*S.ImaginaryUnit/2 - arg)
def _eval_rewrite_as_tanh(self, arg):
return 1/tanh(arg)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return 1/arg
else:
return self.func(arg)
class ReciprocalHyperbolicFunction(HyperbolicFunction):
"""Base class for reciprocal functions of hyperbolic functions. """
#To be defined in class
_reciprocal_of = None
_is_even = None
_is_odd = None
@classmethod
def eval(cls, arg):
if arg.could_extract_minus_sign():
if cls._is_even:
return cls(-arg)
if cls._is_odd:
return -cls(-arg)
t = cls._reciprocal_of.eval(arg)
if hasattr(arg, 'inverse') and arg.inverse() == cls:
return arg.args[0]
return 1/t if t != None else t
def _call_reciprocal(self, method_name, *args, **kwargs):
# Calls method_name on _reciprocal_of
o = self._reciprocal_of(self.args[0])
return getattr(o, method_name)(*args, **kwargs)
def _calculate_reciprocal(self, method_name, *args, **kwargs):
# If calling method_name on _reciprocal_of returns a value != None
# then return the reciprocal of that value
t = self._call_reciprocal(method_name, *args, **kwargs)
return 1/t if t != None else t
def _rewrite_reciprocal(self, method_name, arg):
# Special handling for rewrite functions. If reciprocal rewrite returns
# unmodified expression, then return None
t = self._call_reciprocal(method_name, arg)
if t != None and t != self._reciprocal_of(arg):
return 1/t
def _eval_rewrite_as_exp(self, arg):
return self._rewrite_reciprocal("_eval_rewrite_as_exp", arg)
def _eval_rewrite_as_tractable(self, arg):
return self._rewrite_reciprocal("_eval_rewrite_as_tractable", arg)
def _eval_rewrite_as_tanh(self, arg):
return self._rewrite_reciprocal("_eval_rewrite_as_tanh", arg)
def _eval_rewrite_as_coth(self, arg):
return self._rewrite_reciprocal("_eval_rewrite_as_coth", arg)
def as_real_imag(self, deep = True, **hints):
return (1 / self._reciprocal_of(self.args[0])).as_real_imag(deep, **hints)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=True, **hints)
return re_part + S.ImaginaryUnit*im_part
def _eval_as_leading_term(self, x):
return (1/self._reciprocal_of(self.args[0]))._eval_as_leading_term(x)
def _eval_is_real(self):
return self._reciprocal_of(self.args[0]).is_real
def _eval_is_finite(self):
return (1/self._reciprocal_of(self.args[0])).is_finite
class csch(ReciprocalHyperbolicFunction):
r"""
The hyperbolic cosecant function, `\frac{2}{e^x - e^{-x}}`
* csch(x) -> Returns the hyperbolic cosecant of x
See Also
========
sinh, cosh, tanh, sech, asinh, acosh
"""
_reciprocal_of = sinh
_is_odd = True
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function
"""
if argindex == 1:
return -coth(self.args[0]) * csch(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Returns the next term in the Taylor series expansion
"""
from sympy import bernoulli
if n == 0:
return 1/sympify(x)
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
B = bernoulli(n + 1)
F = factorial(n + 1)
return 2 * (1 - 2**n) * B/F * x**n
def _eval_rewrite_as_cosh(self, arg):
return S.ImaginaryUnit / cosh(arg + S.ImaginaryUnit * S.Pi / 2)
def _sage_(self):
import sage.all as sage
return sage.csch(self.args[0]._sage_())
class sech(ReciprocalHyperbolicFunction):
r"""
The hyperbolic secant function, `\frac{2}{e^x + e^{-x}}`
* sech(x) -> Returns the hyperbolic secant of x
See Also
========
sinh, cosh, tanh, coth, csch, asinh, acosh
"""
_reciprocal_of = cosh
_is_even = True
def fdiff(self, argindex=1):
if argindex == 1:
return - tanh(self.args[0])*sech(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from sympy.functions.combinatorial.numbers import euler
if n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
return euler(n) / factorial(n) * x**(n)
def _eval_rewrite_as_sinh(self, arg):
return S.ImaginaryUnit / sinh(arg + S.ImaginaryUnit * S.Pi /2)
def _sage_(self):
import sage.all as sage
return sage.sech(self.args[0]._sage_())
###############################################################################
############################# HYPERBOLIC INVERSES #############################
###############################################################################
class asinh(Function):
"""
The inverse hyperbolic sine function.
* asinh(x) -> Returns the inverse hyperbolic sine of x
See Also
========
acosh, atanh, sinh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(self.args[0]**2 + 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import asin
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.NegativeInfinity
elif arg is S.Zero:
return S.Zero
elif arg is S.One:
return log(sqrt(2) + 1)
elif arg is S.NegativeOne:
return log(sqrt(2) - 1)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.ComplexInfinity
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * asin(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return -p * (n - 2)**2/(n*(n - 1)) * x**2
else:
k = (n - 1) // 2
R = RisingFactorial(S.Half, k)
F = factorial(k)
return (-1)**k * R / F * x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x):
"""
Rewrites asinh as log function.
"""
return log(x + sqrt(x**2 + 1))
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return sinh
class acosh(Function):
"""
The inverse hyperbolic cosine function.
* acosh(x) -> Returns the inverse hyperbolic cosine of x
See Also
========
asinh, atanh, cosh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(self.args[0]**2 - 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg is S.Zero:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.One:
return S.Zero
elif arg is S.NegativeOne:
return S.Pi*S.ImaginaryUnit
if arg.is_number:
cst_table = {
S.ImaginaryUnit: log(S.ImaginaryUnit*(1 + sqrt(2))),
-S.ImaginaryUnit: log(-S.ImaginaryUnit*(1 + sqrt(2))),
S.Half: S.Pi/3,
-S.Half: 2*S.Pi/3,
sqrt(2)/2: S.Pi/4,
-sqrt(2)/2: 3*S.Pi/4,
1/sqrt(2): S.Pi/4,
-1/sqrt(2): 3*S.Pi/4,
sqrt(3)/2: S.Pi/6,
-sqrt(3)/2: 5*S.Pi/6,
(sqrt(3) - 1)/sqrt(2**3): 5*S.Pi/12,
-(sqrt(3) - 1)/sqrt(2**3): 7*S.Pi/12,
sqrt(2 + sqrt(2))/2: S.Pi/8,
-sqrt(2 + sqrt(2))/2: 7*S.Pi/8,
sqrt(2 - sqrt(2))/2: 3*S.Pi/8,
-sqrt(2 - sqrt(2))/2: 5*S.Pi/8,
(1 + sqrt(3))/(2*sqrt(2)): S.Pi/12,
-(1 + sqrt(3))/(2*sqrt(2)): 11*S.Pi/12,
(sqrt(5) + 1)/4: S.Pi/5,
-(sqrt(5) + 1)/4: 4*S.Pi/5
}
if arg in cst_table:
if arg.is_real:
return cst_table[arg]*S.ImaginaryUnit
return cst_table[arg]
if arg.is_infinite:
return S.Infinity
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return S.Pi*S.ImaginaryUnit / 2
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return p * (n - 2)**2/(n*(n - 1)) * x**2
else:
k = (n - 1) // 2
R = RisingFactorial(S.Half, k)
F = factorial(k)
return -R / F * S.ImaginaryUnit * x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return S.ImaginaryUnit*S.Pi/2
else:
return self.func(arg)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return cosh
class atanh(Function):
"""
The inverse hyperbolic tangent function.
* atanh(x) -> Returns the inverse hyperbolic tangent of x
See Also
========
asinh, acosh, tanh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import atan
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Zero:
return S.Zero
elif arg is S.One:
return S.Infinity
elif arg is S.NegativeOne:
return S.NegativeInfinity
elif arg is S.Infinity:
return -S.ImaginaryUnit * atan(arg)
elif arg is S.NegativeInfinity:
return S.ImaginaryUnit * atan(-arg)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * atan(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return tanh
class acoth(Function):
"""
The inverse hyperbolic cotangent function.
* acoth(x) -> Returns the inverse hyperbolic cotangent of x
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import acot
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Zero
elif arg is S.NegativeInfinity:
return S.Zero
elif arg is S.Zero:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.One:
return S.Infinity
elif arg is S.NegativeOne:
return S.NegativeInfinity
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return 0
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return -S.ImaginaryUnit * acot(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return S.Pi*S.ImaginaryUnit / 2
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return S.ImaginaryUnit*S.Pi/2
else:
return self.func(arg)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return coth
class asech(Function):
"""
The inverse hyperbolic secant function.
* asech(x) -> Returns the inverse hyperbolic secant of x
Examples
========
>>> from sympy import asech, sqrt, S
>>> from sympy.abc import x
>>> asech(x).diff(x)
-1/(x*sqrt(-x**2 + 1))
>>> asech(1).diff(x)
0
>>> asech(1)
0
>>> asech(S(2))
I*pi/3
>>> asech(-sqrt(2))
3*I*pi/4
>>> asech((sqrt(6) - sqrt(2)))
I*pi/12
See Also
========
asinh, atanh, cosh, acoth
References
==========
.. [1] http://en.wikipedia.org/wiki/Hyperbolic_function
.. [2] http://dlmf.nist.gov/4.37
.. [3] http://functions.wolfram.com/ElementaryFunctions/ArcSech/
"""
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -1/(z*sqrt(1 - z**2))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.NegativeInfinity:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.Zero:
return S.Infinity
elif arg is S.One:
return S.Zero
elif arg is S.NegativeOne:
return S.Pi*S.ImaginaryUnit
if arg.is_number:
cst_table = {
S.ImaginaryUnit: - (S.Pi*S.ImaginaryUnit / 2) + log(1 + sqrt(2)),
-S.ImaginaryUnit: (S.Pi*S.ImaginaryUnit / 2) + log(1 + sqrt(2)),
(sqrt(6) - sqrt(2)): S.Pi / 12,
(sqrt(2) - sqrt(6)): 11*S.Pi / 12,
sqrt(2 - 2/sqrt(5)): S.Pi / 10,
-sqrt(2 - 2/sqrt(5)): 9*S.Pi / 10,
2 / sqrt(2 + sqrt(2)): S.Pi / 8,
-2 / sqrt(2 + sqrt(2)): 7*S.Pi / 8,
2 / sqrt(3): S.Pi / 6,
-2 / sqrt(3): 5*S.Pi / 6,
(sqrt(5) - 1): S.Pi / 5,
(1 - sqrt(5)): 4*S.Pi / 5,
sqrt(2): S.Pi / 4,
-sqrt(2): 3*S.Pi / 4,
sqrt(2 + 2/sqrt(5)): 3*S.Pi / 10,
-sqrt(2 + 2/sqrt(5)): 7*S.Pi / 10,
S(2): S.Pi / 3,
-S(2): 2*S.Pi / 3,
sqrt(2*(2 + sqrt(2))): 3*S.Pi / 8,
-sqrt(2*(2 + sqrt(2))): 5*S.Pi / 8,
(1 + sqrt(5)): 2*S.Pi / 5,
(-1 - sqrt(5)): 3*S.Pi / 5,
(sqrt(6) + sqrt(2)): 5*S.Pi / 12,
(-sqrt(6) - sqrt(2)): 7*S.Pi / 12,
}
if arg in cst_table:
if arg.is_real:
return cst_table[arg]*S.ImaginaryUnit
return cst_table[arg]
if arg is S.ComplexInfinity:
return S.NaN
@staticmethod
@cacheit
def expansion_term(n, x, *previous_terms):
if n == 0:
return log(2 / x)
elif n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2 and n > 2:
p = previous_terms[-2]
return p * (n - 1)**2 // (n // 2)**2 * x**2 / 4
else:
k = n // 2
R = RisingFactorial(S.Half , k) * n
F = factorial(k) * n // 2 * n // 2
return -1 * R / F * x**n / 4
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return sech
def _eval_rewrite_as_log(self, arg):
return log(1/arg + sqrt(1/arg**2 - 1))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.