repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
xiaojunwu/crosswalk-test-suite
|
webapi/tct-content-tizen-tests/inst.xpk.py
|
1
|
5746
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "xwalkctl" in cmd:
cmd = "su - app -c '%s;%s'" % (XW_ENV, cmd)
return cmd
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('xwalkctl'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('xwalkctl'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_app_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 1:
continue
name = pkg_infos[1]
if pkg_name == name:
test_app_id = pkg_infos[0]
print test_app_id
break
return test_app_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"xwalkctl -u %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
(return_code, output) = doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, PKG_NAME))
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"xwalkctl -i %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
if not doRemoteCopy("%s/media" % SCRIPT_DIR, "%s/%s" % (SRC_DIR, PKG_NAME)):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
bsd-3-clause
|
LabAdvComp/indexd
|
indexd/alias/driver.py
|
1
|
1749
|
import abc
from ..driver_base import SQLAlchemyDriverBase
class AliasDriverABC(SQLAlchemyDriverBase, metaclass=abc.ABCMeta):
"""
Alias Driver Abstract Base Class
Driver interface for interacting with alias backends.
"""
def __init__(self, conn, **config):
super().__init__(conn, **config)
@abc.abstractmethod
def aliases(self, limit=100, start="", size=None, urls=None, hashes=None):
"""
Returns a list of aliases.
"""
raise NotImplementedError("TODO")
@abc.abstractmethod
def upsert(
self,
name,
rev=None,
size=None,
hashes=None,
release=None,
metastring=None,
host_authorities=None,
keeper_authority=None,
):
"""
Update or insert alias record.
"""
raise NotImplementedError("TODO")
@abc.abstractmethod
def get(self, did):
"""
Gets a record given the record id.
"""
raise NotImplementedError("TODO")
@abc.abstractmethod
def delete(self, did, rev):
"""
Deletes record.
"""
raise NotImplementedError("TODO")
@abc.abstractmethod
def __contains__(self, did):
"""
Returns True if record is stored by backend.
Returns False otherwise.
"""
raise NotImplementedError("TODO")
@abc.abstractmethod
def __iter__(self):
"""
Returns an iterator over unique records stored by backend.
"""
raise NotImplementedError("TODO")
@abc.abstractmethod
def __len__(self):
"""
Returns the number of unique records stored by backend.
"""
raise NotImplementedError("TODO")
|
apache-2.0
|
leighpauls/k2cro4
|
third_party/WebKit/Tools/Scripts/webkitpy/common/system/path.py
|
191
|
5046
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""generic routines to convert platform-specific paths to URIs."""
import atexit
import subprocess
import sys
import threading
import urllib
def abspath_to_uri(platform, path):
"""Converts a platform-specific absolute path to a file: URL."""
return "file:" + _escape(_convert_path(platform, path))
def cygpath(path):
"""Converts an absolute cygwin path to an absolute Windows path."""
return _CygPath.convert_using_singleton(path)
# Note that this object is not threadsafe and must only be called
# from multiple threads under protection of a lock (as is done in cygpath())
class _CygPath(object):
"""Manages a long-running 'cygpath' process for file conversion."""
_lock = None
_singleton = None
@staticmethod
def stop_cygpath_subprocess():
if not _CygPath._lock:
return
with _CygPath._lock:
if _CygPath._singleton:
_CygPath._singleton.stop()
@staticmethod
def convert_using_singleton(path):
if not _CygPath._lock:
_CygPath._lock = threading.Lock()
with _CygPath._lock:
if not _CygPath._singleton:
_CygPath._singleton = _CygPath()
# Make sure the cygpath subprocess always gets shutdown cleanly.
atexit.register(_CygPath.stop_cygpath_subprocess)
return _CygPath._singleton.convert(path)
def __init__(self):
self._child_process = None
def start(self):
assert(self._child_process is None)
args = ['cygpath', '-f', '-', '-wa']
self._child_process = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def is_running(self):
if not self._child_process:
return False
return self._child_process.returncode is None
def stop(self):
if self._child_process:
self._child_process.stdin.close()
self._child_process.wait()
self._child_process = None
def convert(self, path):
if not self.is_running():
self.start()
self._child_process.stdin.write("%s\r\n" % path)
self._child_process.stdin.flush()
windows_path = self._child_process.stdout.readline().rstrip()
# Some versions of cygpath use lowercase drive letters while others
# use uppercase. We always convert to uppercase for consistency.
windows_path = '%s%s' % (windows_path[0].upper(), windows_path[1:])
return windows_path
def _escape(path):
"""Handle any characters in the path that should be escaped."""
# FIXME: web browsers don't appear to blindly quote every character
# when converting filenames to files. Instead of using urllib's default
# rules, we allow a small list of other characters through un-escaped.
# It's unclear if this is the best possible solution.
return urllib.quote(path, safe='/+:')
def _convert_path(platform, path):
"""Handles any os-specific path separators, mappings, etc."""
if platform.is_cygwin():
return _winpath_to_uri(cygpath(path))
if platform.is_win():
return _winpath_to_uri(path)
return _unixypath_to_uri(path)
def _winpath_to_uri(path):
"""Converts a window absolute path to a file: URL."""
return "///" + path.replace("\\", "/")
def _unixypath_to_uri(path):
"""Converts a unix-style path to a file: URL."""
return "//" + path
|
bsd-3-clause
|
neoareslinux/neutron
|
neutron/plugins/cisco/common/cisco_credentials_v2.py
|
50
|
2067
|
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_exceptions as cexc
from neutron.plugins.cisco.common import config
from neutron.plugins.cisco.db import network_db_v2 as cdb
class Store(object):
"""Credential Store."""
@staticmethod
def initialize():
dev_dict = config.get_device_dictionary()
for key in dev_dict:
dev_id, dev_ip, dev_key = key
if dev_key == const.USERNAME:
try:
cdb.add_credential(
dev_ip,
dev_dict[dev_id, dev_ip, const.USERNAME],
dev_dict[dev_id, dev_ip, const.PASSWORD],
dev_id)
except cexc.CredentialAlreadyExists:
# We are quietly ignoring this, since it only happens
# if this class module is loaded more than once, in
# which case, the credentials are already populated
pass
@staticmethod
def get_username(cred_name):
"""Get the username."""
credential = cdb.get_credential_name(cred_name)
return credential[const.CREDENTIAL_USERNAME]
@staticmethod
def get_password(cred_name):
"""Get the password."""
credential = cdb.get_credential_name(cred_name)
return credential[const.CREDENTIAL_PASSWORD]
|
apache-2.0
|
eliasdorneles/scrapyd
|
scrapyd/environ.py
|
8
|
2423
|
import os
from urlparse import urlparse, urlunparse
from w3lib.url import path_to_file_uri
from zope.interface import implements
from .interfaces import IEnvironment
class Environment(object):
implements(IEnvironment)
def __init__(self, config, initenv=os.environ):
self.dbs_dir = config.get('dbs_dir', 'dbs')
self.logs_dir = config.get('logs_dir', 'logs')
self.items_dir = config.get('items_dir', 'items')
self.jobs_to_keep = config.getint('jobs_to_keep', 5)
if config.cp.has_section('settings'):
self.settings = dict(config.cp.items('settings'))
else:
self.settings = {}
self.initenv = initenv
def get_environment(self, message, slot):
project = message['_project']
env = self.initenv.copy()
env['SCRAPY_SLOT'] = str(slot)
env['SCRAPY_PROJECT'] = project
env['SCRAPY_SPIDER'] = message['_spider']
env['SCRAPY_JOB'] = message['_job']
if project in self.settings:
env['SCRAPY_SETTINGS_MODULE'] = self.settings[project]
if self.logs_dir:
env['SCRAPY_LOG_FILE'] = self._get_file(message, self.logs_dir, 'log')
if self.items_dir:
env['SCRAPY_FEED_URI'] = self._get_feed_uri(message, 'jl')
return env
def _get_feed_uri(self, message, ext):
url = urlparse(self.items_dir)
if url.scheme.lower() in ['', 'file']:
return path_to_file_uri(self._get_file(message, url.path, ext))
return urlunparse((url.scheme,
url.netloc,
'/'.join([url.path,
message['_project'],
message['_spider'],
'%s.%s' % (message['_job'], ext)]),
url.params,
url.query,
url.fragment))
def _get_file(self, message, dir, ext):
logsdir = os.path.join(dir, message['_project'], \
message['_spider'])
if not os.path.exists(logsdir):
os.makedirs(logsdir)
to_delete = sorted((os.path.join(logsdir, x) for x in \
os.listdir(logsdir)), key=os.path.getmtime)[:-self.jobs_to_keep]
for x in to_delete:
os.remove(x)
return os.path.join(logsdir, "%s.%s" % (message['_job'], ext))
|
bsd-3-clause
|
vbannai/neutron
|
neutron/extensions/routerservicetype.py
|
11
|
1602
|
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Kaiwei Fan, VMware, Inc
SERVICE_TYPE_ID = 'service_type_id'
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {
SERVICE_TYPE_ID: {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'default': None, 'is_visible': True},
}
}
class Routerservicetype(object):
"""Extension class supporting router service type."""
@classmethod
def get_name(cls):
return "Router Service Type"
@classmethod
def get_alias(cls):
return "router-service-type"
@classmethod
def get_description(cls):
return "Provides router service type"
@classmethod
def get_namespace(cls):
return ""
@classmethod
def get_updated(cls):
return "2013-01-29T00:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
apache-2.0
|
sfcl/severcart
|
accounts/permissions.py
|
2
|
4520
|
# -*- coding:utf-8 -*-
from django.utils.translation import ugettext as _
def construct_checkboxes_perm(sever_permissions):
unselect_perm = str() # используется для формирования чекбоксов добавления прав аользователя. Если полномочие отсутствует, то право активировано.
if not(sever_permissions & 1):
unselect_perm += '<label class="marginTop"><input name="perm" type="checkbox" value="login"/> %(var)s</label>' \
% {'var': _('Login to the application')}
if not(sever_permissions & 2):
unselect_perm += '<label class="marginTop"><input name="perm" type="checkbox" value="admin"/> %(var)s</label>' \
% {'var': _('Administration')}
if not(sever_permissions & 4):
unselect_perm += '<label class="marginTop"><input name="perm" type="checkbox" value="dict"/> %(var)s</label>' \
% {'var': _('Directory')}
if not(sever_permissions & 8):
unselect_perm += '<label class="marginTop"><input name="perm" type="checkbox" value="report"/> %(var)s</label>' \
% {'var': _('Reporting')}
if not(sever_permissions & 16):
unselect_perm += '<label class="marginTop"><input name="perm" type="checkbox" value="cart"/> %(var)s</label>' \
% {'var': _('Work with cartridges')}
if not(sever_permissions & 32):
unselect_perm += '<label class="marginTop"><input name="perm" type="checkbox" value="comp"/> %(var)s</label>' \
% {'var': _('Working with computers')}
if not(sever_permissions & 64):
unselect_perm += '<label class="marginTop"><input name="perm" type="checkbox" value="cons"/> %(var)s</label>' \
% {'var': _('Work with other consumables')}
if not(sever_permissions & 128):
unselect_perm += '<label class="marginTop"><input name="perm" type="checkbox" value="print"/> %(var)s</label>' \
% {'var': _('Work with printers')}
return unselect_perm
def decider_user_permissions(sever_permissions, return_set=False, return_li=False):
"""
Дешифровка прав доступа.
"""
option_perm = str()
perm_set = set()
ul_li = str()
if sever_permissions & 1:
option_perm += '<option value="login">%(var)s</option>' % {'var': _('Entrance to the program')}
ul_li += '<li>%(right)s</li>' % {'right': _('Entrance to the program')}
perm_set.add('login')
if sever_permissions & 2:
option_perm += '<option value="admin" title="%(var)s">%(var)s</option>' % {'var': _('Administration')}
ul_li +='<li>%(right)s</li>' % {'right': _('Administration')}
perm_set.add('admin')
if sever_permissions & 4:
option_perm += '<option value="dict" title="%(var)s">%(var)s</option>' % {'var': _('Working with directories')}
ul_li += '<li>%(right)s</li>' % {'right': _('Working with directories')}
perm_set.add('dict')
if sever_permissions & 8:
option_perm += '<option value="report" title="%(var)s">%(var)s</option>' % {'var': _('Generate reports')}
ul_li += '<li>%(right)s</li>' % {'right': _('Generate reports')}
perm_set.add('report')
if sever_permissions & 16:
option_perm += '<option value="cart" title="%(var)s">%(var)s</option>' % {'var': _('Work with print cartridges')}
ul_li += '<li>%(right)s</li>' % {'right': _('Work with print cartridges')}
perm_set.add('cart')
if sever_permissions & 32:
option_perm += '<option value="comp" title="%(var)s">%(var)s</option>' % {'var': _('Working with computers')}
ul_li += '<li>%(right)s</li>' % {'right': _('Working with computers')}
perm_set.add('comp')
if sever_permissions & 64:
option_perm += '<option value="cons" title="%(var)s">%(var)s</option>' % {'var': _('Work with other consumables')}
ul_li += '<li>%(right)s</li>' % {'right': _('Work with other consumables')}
perm_set.add('cons')
if sever_permissions & 128:
option_perm += '<option value="print" title="%(var)s">%(var)s</option>' % {'var': _('Work with printers')}
ul_li += '<li>%(right)s</li>' % {'right': _('Work with printers')}
perm_set.add('print')
if return_set:
return perm_set
elif return_li:
return ul_li
else:
return option_perm
|
gpl-2.0
|
ruffy91/micropython
|
tests/basics/syntaxerror.py
|
21
|
2762
|
# test syntax errors
def test_syntax(code):
try:
exec(code)
print("no SyntaxError")
except IndentationError:
print("IndentationError")
except SyntaxError:
print("SyntaxError")
# non-newline after line-continuation character (lexer error)
test_syntax("a \\a\n")
# dedent mismatch (lexer error)
test_syntax("def f():\n a\n a\n")
# unclosed string (lexer error)
test_syntax("'abc")
# invalid (lexer error)
test_syntax("!")
test_syntax("$")
test_syntax("`")
# bad indentation (lexer error)
test_syntax(" a\n")
# malformed integer literal (parser error)
test_syntax("123z")
# can't assign to literals
test_syntax("1 = 2")
test_syntax("'' = 1")
test_syntax("{} = 1")
# can't assign to comprehension
test_syntax("(i for i in a) = 1")
# can't assign to function
test_syntax("f() = 1")
# can't assign to power
test_syntax("f**2 = 1")
# can't assign to power of composite
test_syntax("f[0]**2 = 1")
# can't assign to empty tuple
test_syntax("() = 1")
# can't have multiple *x on LHS
test_syntax("*a, *b = c")
# can't do augmented assignment to tuple
test_syntax("a, b += c")
test_syntax("(a, b) += c")
# can't do augmented assignment to list
test_syntax("[a, b] += c")
# non-default argument can't follow default argument
test_syntax("def f(a=1, b): pass")
# can't delete these things
test_syntax("del ()")
test_syntax("del f()")
test_syntax("del f[0]**2")
test_syntax("del (a for a in a)")
# must be in a "loop"
test_syntax("break")
test_syntax("continue")
# must be in a function
test_syntax("return")
test_syntax("yield")
test_syntax("nonlocal a")
# error on uPy, warning on CPy
#test_syntax("def f():\n a = 1\n global a")
# default except must be last
test_syntax("try:\n a\nexcept:\n pass\nexcept:\n pass")
# LHS of keywords must be id's
test_syntax("f(1=2)")
# non-keyword after keyword
test_syntax("f(a=1, 2)")
# doesn't error on uPy but should
#test_syntax("f(1, i for i in i)")
# all elements of dict/set must be pairs or singles
test_syntax("{1:2, 3}")
test_syntax("{1, 2:3}")
# can't mix non-bytes with bytes when concatenating
test_syntax("'abc' b'def'")
# can't reuse same name for argument
test_syntax("def f(a, a): pass")
# nonlocal must exist in outer function/class scope
test_syntax("def f():\n def g():\n nonlocal a")
# param can't be redefined as global
test_syntax('def f(x):\n global x')
# param can't be redefined as nonlocal
test_syntax('def f(x):\n nonlocal x')
# can define variable to be both nonlocal and global
test_syntax('def f():\n nonlocal x\n global x')
# can't have multiple *'s
test_syntax('def f(x, *a, *):\n pass')
test_syntax('lambda x, *a, *: 1')
# **kw must be last
test_syntax('def f(x, *a, **kw, r):\n pass')
test_syntax('lambda x, *a, **kw, r: 1')
|
mit
|
LLNL/spack
|
var/spack/repos/builtin/packages/mongo-c-driver/package.py
|
2
|
5290
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class MongoCDriver(Package):
"""libmongoc is a client library written in C for MongoDB."""
homepage = "https://github.com/mongodb/mongo-c-driver"
url = "https://github.com/mongodb/mongo-c-driver/releases/download/1.7.0/mongo-c-driver-1.7.0.tar.gz"
maintainers = ['michaelkuhn']
version('1.16.2', sha256='0a722180e5b5c86c415b9256d753b2d5552901dc5d95c9f022072c3cd336887e')
version('1.9.5', sha256='4a4bd0b0375450250a3da50c050b84b9ba8950ce32e16555714e75ebae0b8019')
version('1.9.4', sha256='910c2f1b2e3df4d0ea39c2f242160028f90fcb8201f05339a730ec4ba70811fb')
version('1.9.3', sha256='c2c94ef63aaa09efabcbadc4ac3c8740faa102266bdd2559d550f1955b824398')
version('1.9.1', sha256='91951444d34581deeaff46cc2985c68805754f618a20ac369b761ce9b621c4cd')
version('1.8.1', sha256='87d87b7581018cde7edff85f522d43d9c0a226df26fa53b77ca1613a3aca8233')
version('1.8.0', sha256='1b53883b4cbf08e7d77ad7ab7a02deca90b1719c67f9ad132b47e60d0206ea4e')
version('1.7.0', sha256='48a0dbd44fef2124b51cf501f06be269b1a39452303b880b37473a6030c6e023')
version('1.6.3', sha256='82df03de117a3ccf563b9eccfd2e5365df8f215a36dea7446d439969033ced7b')
version('1.6.2', sha256='7ec27e9be4da2bf9e4b316374f8c29f816f0a0f019b984411777e9681e17f70e')
version('1.6.1', sha256='1bdfb27944c6da8e56da209a5d56efac70df1f8c4ca4498b46f75bf3f9360898')
variant('ssl', default=True, description='Enable SSL support.')
variant('snappy', default=True, description='Enable Snappy support.')
variant('zlib', default=True, description='Enable zlib support.')
variant('zstd', default=True, description='Enable zstd support.')
patch('https://github.com/mongodb/mongo-c-driver/pull/466.patch', sha256='713a872217d11aba04a774785a2824d26b566543c270a1fa386114f5200fda20', when='@1.8.1')
depends_on('[email protected]:', type='build', when='@1.10.0:')
depends_on('autoconf', type='build', when='@1.8.1')
depends_on('automake', type='build', when='@1.8.1')
depends_on('libtool', type='build', when='@1.8.1')
depends_on('m4', type='build', when='@1.8.1')
depends_on('pkgconfig', type='build')
# When updating mongo-c-driver, libbson has to be kept in sync.
depends_on('[email protected]:1.16.99', when='@1.16')
depends_on('[email protected]:1.9.99', when='@1.9')
depends_on('[email protected]:1.8.99', when='@1.8')
depends_on('[email protected]:1.7.99', when='@1.7')
depends_on('[email protected]:1.6.99', when='@1.6')
depends_on('openssl', when='+ssl')
depends_on('snappy', when='+snappy')
depends_on('zlib', when='+zlib')
depends_on('zstd', when='+zstd')
def cmake_args(self):
spec = self.spec
args = [
'-DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF',
'-DENABLE_BSON=SYSTEM'
]
if '+ssl' in spec:
args.append('-DENABLE_SSL=OPENSSL')
else:
args.append('-DENABLE_SSL=OFF')
if '+snappy' in spec:
args.append('-DENABLE_SNAPPY=ON')
else:
args.append('-DENABLE_SNAPPY=OFF')
if '+zlib' in spec:
args.append('-DENABLE_ZLIB=SYSTEM')
else:
args.append('-DENABLE_ZLIB=OFF')
if '+zstd' in spec:
args.append('-DENABLE_ZSTD=ON')
else:
args.append('-DENABLE_ZSTD=OFF')
return args
def install(self, spec, prefix):
with working_dir('spack-build', create=True):
# We cannot simply do
# cmake('..', *std_cmake_args, *self.cmake_args())
# because that is not Python 2 compatible. Instead, collect
# arguments into a temporary buffer first.
args = []
args.extend(std_cmake_args)
args.extend(self.cmake_args())
cmake('..', *args)
make()
make('install')
@property
def force_autoreconf(self):
# Run autoreconf due to build system patch
return self.spec.satisfies('@1.8.1')
def configure_args(self):
spec = self.spec
args = [
'--disable-automatic-init-and-cleanup',
'--with-libbson=system'
]
if '+ssl' in spec:
args.append('--enable-ssl=openssl')
else:
args.append('--enable-ssl=no')
if spec.satisfies('@1.7.0:'):
# --with-{snappy,zlib}=system are broken for versions < 1.8.1
if '+snappy' not in spec:
args.append('--with-snappy=no')
elif spec.satisfies('@1.8.1:'):
args.append('--with-snappy=system')
if '+zlib' not in spec:
args.append('--with-zlib=no')
elif spec.satisfies('@1.8.1:'):
args.append('--with-zlib=system')
return args
@when('@:1.9.99')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix), *self.configure_args())
make()
if self.run_tests:
make('check')
make('install')
if self.run_tests:
make('installcheck')
|
lgpl-2.1
|
kingvuplus/ME-TEST1
|
lib/python/Plugins/SystemPlugins/DefaultServicesScanner/plugin.py
|
21
|
5485
|
#from Components.ActionMap import ActionMap, NumberActionMap
#from Components.Input import Input
#from Components.Ipkg import IpkgComponent
#from Components.Label import Label
#from Components.MenuList import MenuList
#from Components.Slider import Slider
from Components.NimManager import nimmanager
from Plugins.Plugin import PluginDescriptor
from Screens.ScanSetup import ScanSetup
from Screens.ServiceScan import ServiceScan
from Screens.MessageBox import MessageBox
from Tools.Directories import resolveFilename, SCOPE_CONFIG, copyfile
#from Screens.Screen import Screen
from os import unlink
from enigma import eTimer, eDVBDB
class DefaultServiceScan(ServiceScan):
skin = """
<screen position="150,115" size="420,390" title="Service Scan">
<widget source="FrontendInfo" render="Pixmap" pixmap="skin_default/icons/scan-s.png" position="5,5" size="64,64" transparent="1" alphatest="on">
<convert type="FrontendInfo">TYPE</convert>
<convert type="ValueRange">0,0</convert>
<convert type="ConditionalShowHide" />
</widget>
<widget source="FrontendInfo" render="Pixmap" pixmap="skin_default/icons/scan-c.png" position="5,5" size="64,64" transparent="1" alphatest="on">
<convert type="FrontendInfo">TYPE</convert>
<convert type="ValueRange">1,1</convert>
<convert type="ConditionalShowHide" />
</widget>
<widget source="FrontendInfo" render="Pixmap" pixmap="skin_default/icons/scan-t.png" position="5,5" size="64,64" transparent="1" alphatest="on">
<convert type="FrontendInfo">TYPE</convert>
<convert type="ValueRange">2,2</convert>
<convert type="ConditionalShowHide" />
</widget>
<widget name="network" position="80,15" size="330,20" font="Regular;20" />
<widget name="transponder" position="80,40" size="330,20" font="Regular;20" />
<widget name="scan_state" position="10,80" zPosition="2" size="400,20" font="Regular;18" />
<widget name="pass" position="10,80" size="400,20" font="Regular;18" />
<widget name="scan_progress" position="10,105" size="400,15" pixmap="skin_default/progress_big.png" borderWidth="2" borderColor="#cccccc" />
<widget name="servicelist" position="10,135" size="400,265" selectionDisabled="1" />
</screen>"""
def __init__(self, session, scanList):
try:
unlink(resolveFilename(SCOPE_CONFIG) + "/lamedb");
except OSError:
pass
db = eDVBDB.getInstance()
db.reloadServicelist()
ServiceScan.__init__(self, session, scanList)
self.timer = eTimer()
self.timer.callback.append(self.ok)
self.timer.start(1000)
class DefaultServicesScannerPlugin(ScanSetup):
skin = """
<screen position="100,115" size="520,390" title="Service scan">
<widget name="config" position="10,10" size="500,350" scrollbarMode="showOnDemand" />
<widget name="introduction" position="10,365" size="500,25" font="Regular;20" halign="center" />
</screen>"""
def __init__(self, session, args = None):
ScanSetup.__init__(self, session)
# backup lamedb
confdir = resolveFilename(SCOPE_CONFIG)
copyfile(confdir + "/lamedb", confdir + "/lamedb.backup")
self.scan_type.value = "multisat"
self.createSetup()
self.scanIndex = 0
self.selectSat(0)
self.onFirstExecBegin.append(self.runScan)
def selectSat(self, index):
for satindex in range(len(self.multiscanlist)):
if satindex != index:
self.multiscanlist[satindex][1].value = False
else:
self.multiscanlist[satindex][1].value = True
def runScan(self):
print "runScan"
self.keyGo()
def startScan(self, tlist, flags, feid, networkid = 0):
print "startScan"
if len(tlist):
# flags |= eComponentScan.scanSearchBAT
self.session.openWithCallback(self.scanFinished, DefaultServiceScan, [{"transponders": tlist, "feid": feid, "flags": flags, "networkid": networkid}])
else:
self.session.openWithCallback(self.scanFinished, MessageBox, _("Nothing to scan!\nPlease setup your tuner settings before you start a service scan."), MessageBox.TYPE_ERROR)
def scanFinished(self, value = None):
print "finished"
print "self.scanIndex:", self.scanIndex
db = eDVBDB.getInstance()
print "self.multiscanlist:", self.multiscanlist
if len(self.multiscanlist) - 1 >= self.scanIndex and len(self.multiscanlist[self.scanIndex]) > 0:
satint = self.multiscanlist[self.scanIndex][0]
print "scanned sat:", satint
db.saveServicelist("/tmp/lamedb." + str(satint))
file = open("/tmp/sat" + str(satint) + ".info", "w")
xml = """<default>
<prerequisites>
<tag type="services" />
<bcastsystem type="DVB-S" />
<satellite type="%d" />
</prerequisites>
<info>
<author>%s</author>
<name>%s</name>
</info>
<files type="directories">
<file type="services" name="lamedb.%d">
</file>
</files>
</default>""" % (satint, "OpenPli", nimmanager.getSatDescription(satint), satint)
file.write(xml)
file.close()
self.scanIndex += 1
if self.scanIndex + 1 >= len(self.multiscanlist):
print "no more sats to scan"
confdir = resolveFilename(SCOPE_CONFIG)
copyfile(confdir + "/lamedb.backup", confdir + "/lamedb")
db.reloadServicelist()
self.close()
else:
self.selectSat(self.scanIndex)
self.keyGo()
def DefaultServicesScannerMain(session, **kwargs):
session.open(DefaultServicesScannerPlugin)
def Plugins(**kwargs):
return PluginDescriptor(name=_("Default Services Scanner"), description=_("Scans default lamedbs sorted by satellite with a connected dish positioner"), where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=DefaultServicesScannerMain)
|
gpl-2.0
|
Yethiel/re-volt-addon
|
io_revolt/texanim.py
|
1
|
15695
|
"""
Name: texanim
Purpose: Provides operators and functions for the texture animation panel
Description:
Moved from operators and panels here to reduce script line amount
"""
if "bpy" in locals():
import imp
imp.reload(common)
import bpy
from . import common
from . import rvstruct
from .common import *
import bpy
class ButtonCopyUvToFrame(bpy.types.Operator):
bl_idname = "texanim.copy_uv_to_frame"
bl_label = "UV to Frame"
bl_description = "Copies the UV coordinates of the currently selected face to the texture animation frame"
def execute(self, context):
copy_uv_to_frame(context)
redraw()
return{"FINISHED"}
class ButtonCopyFrameToUv(bpy.types.Operator):
bl_idname = "texanim.copy_frame_to_uv"
bl_label = "Frame to UV"
bl_description = "Copies the UV coordinates of the frame to the currently selected face"
def execute(self, context):
copy_frame_to_uv(context)
redraw()
return{"FINISHED"}
class PreviewNextFrame(bpy.types.Operator):
bl_idname = "texanim.prev_next"
bl_label = "Preview Next"
bl_description = "Loads the next frame and previews it on the selected face"
def execute(self, context):
props = context.scene.revolt
props.ta_current_frame += 1
copy_frame_to_uv(context)
return{"FINISHED"}
class PreviewPrevFrame(bpy.types.Operator):
bl_idname = "texanim.prev_prev"
bl_label = "Preview Previous"
bl_description = "Loads the previous frame and previews it on the selected face"
def execute(self, context):
props = context.scene.revolt
props.ta_current_frame -= 1
copy_frame_to_uv(context)
return{"FINISHED"}
class TexAnimTransform(bpy.types.Operator):
bl_idname = "texanim.transform"
bl_label = "Transform Animation"
bl_description = "Creates a linear animation from one frame to another"
frame_start = bpy.props.IntProperty(
name = "Start Frame",
description = "Start frame of the animation",
min = 0
)
frame_end = bpy.props.IntProperty(
name = "End Frame",
description = "End frame of the animation",
min = 0,
)
delay = bpy.props.FloatProperty(
name = "Frame duration",
description = "Duration of every frame",
min = 0.0,
default = 0.02,
)
texture = bpy.props.IntProperty(
name = "Texture",
default = 0,
min = -1,
max = TEX_PAGES_MAX-1,
description = "Texture for every frame"
)
def execute(self, context):
props = context.scene.revolt
ta = eval(props.texture_animations)
slot = props.ta_current_slot
max_frames = props.ta_max_frames
frame_start = self.frame_start
frame_end = self.frame_end
if self.frame_end > max_frames - 1:
msg_box(
"Frame out of range.\n"
"Please set the amount of frames to {}.".format(
frame_end + 1),
"ERROR"
)
return {'FINISHED'}
elif self.frame_start == self.frame_end:
msg_box("Frame range too short.", "ERROR")
return {'FINISHED'}
uv_start = (
(ta[slot]["frames"][frame_start]["uv"][0]["u"],
ta[slot]["frames"][frame_start]["uv"][0]["v"]),
(ta[slot]["frames"][frame_start]["uv"][1]["u"],
ta[slot]["frames"][frame_start]["uv"][1]["v"]),
(ta[slot]["frames"][frame_start]["uv"][2]["u"],
ta[slot]["frames"][frame_start]["uv"][2]["v"]),
(ta[slot]["frames"][frame_start]["uv"][3]["u"],
ta[slot]["frames"][frame_start]["uv"][3]["v"])
)
uv_end = (
(ta[slot]["frames"][frame_end]["uv"][0]["u"],
ta[slot]["frames"][frame_end]["uv"][0]["v"]),
(ta[slot]["frames"][frame_end]["uv"][1]["u"],
ta[slot]["frames"][frame_end]["uv"][1]["v"]),
(ta[slot]["frames"][frame_end]["uv"][2]["u"],
ta[slot]["frames"][frame_end]["uv"][2]["v"]),
(ta[slot]["frames"][frame_end]["uv"][3]["u"],
ta[slot]["frames"][frame_end]["uv"][3]["v"])
)
nframes = abs(frame_end - frame_start) + 1
for i in range(0, nframes):
current_frame = frame_start + i
prog = i / (frame_end - frame_start)
ta[slot]["frames"][frame_start + i]["delay"] = self.delay
ta[slot]["frames"][frame_start + i]["texture"] = self.texture
for j in range(0, 4):
new_u = uv_start[j][0] * (1 - prog) + uv_end[j][0] * prog
new_v = uv_start[j][1] * (1 - prog) + uv_end[j][1] * prog
ta[slot]["frames"][frame_start + i]["uv"][j]["u"] = new_u
ta[slot]["frames"][frame_start + i]["uv"][j]["v"] = new_v
props.texture_animations = str(ta)
update_ta_current_frame(self, context)
msg_box("Animation from frame {} to {} completed.".format(
frame_start, frame_end),
icon = "FILE_TICK"
)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "frame_start")
row.prop(self, "frame_end")
row = layout.row()
row.prop(self, "delay", icon="PREVIEW_RANGE")
row.prop(self, "texture", icon="TEXTURE")
class TexAnimGrid(bpy.types.Operator):
bl_idname = "texanim.grid"
bl_label = "Grid Animation"
bl_description = "Creates an animation based on a grid texture."
frame_start = bpy.props.IntProperty(
name = "Start Frame",
min = 0,
description = "Start frame of the animation"
)
grid_x = bpy.props.IntProperty(
name = "X Resolution",
min = 1,
default = 2,
description = "Amount of frames along the X axis"
)
grid_y = bpy.props.IntProperty(
name = "Y Resolution",
min = 1,
default = 2,
description = "Amount of frames along the Y axis"
)
delay = bpy.props.FloatProperty(
name = "Frame duration",
description = "Duration of every frame",
min = 0.0,
default = 0.02,
)
texture = bpy.props.IntProperty(
name = "Texture",
default = 0,
min = -1,
max = TEX_PAGES_MAX-1,
description = "Texture for every frame"
)
def execute(self, context):
props = context.scene.revolt
ta = eval(props.texture_animations)
slot = props.ta_current_slot
max_frames = props.ta_max_frames
frame_start = self.frame_start
grid_x = self.grid_x
grid_y = self.grid_y
nframes = grid_x * grid_y
if nframes > max_frames:
msg_box(
"Frame out of range.\n"
"Please set the amount of frames to {}.".format(
frame_end + 1),
"ERROR"
)
return {'FINISHED'}
i = 0
for y in range(grid_x):
for x in range(grid_y):
uv0 = (x/grid_x, y/grid_y)
uv1 = ((x+1)/grid_x, y/grid_y)
uv2 = ((x+1)/grid_x, (y+1)/grid_y)
uv3 = (x/grid_x, (y+1)/grid_y)
ta[slot]["frames"][frame_start + i]["delay"] = self.delay
ta[slot]["frames"][frame_start + i]["texture"] = self.texture
ta[slot]["frames"][frame_start + i]["uv"][0]["u"] = uv0[0]
ta[slot]["frames"][frame_start + i]["uv"][0]["v"] = uv0[1]
ta[slot]["frames"][frame_start + i]["uv"][1]["u"] = uv1[0]
ta[slot]["frames"][frame_start + i]["uv"][1]["v"] = uv1[1]
ta[slot]["frames"][frame_start + i]["uv"][2]["u"] = uv2[0]
ta[slot]["frames"][frame_start + i]["uv"][2]["v"] = uv2[1]
ta[slot]["frames"][frame_start + i]["uv"][3]["u"] = uv3[0]
ta[slot]["frames"][frame_start + i]["uv"][3]["v"] = uv3[1]
i += 1
props.texture_animations = str(ta)
update_ta_current_frame(self, context)
msg_box("Animation of {} frames completed.".format(
nframes),
icon = "FILE_TICK"
)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "frame_start")
row = layout.row(align=True)
row.prop(self, "grid_x")
row.prop(self, "grid_y")
row = layout.row()
row.prop(self, "delay", icon="PREVIEW_RANGE")
row.prop(self, "texture", icon="TEXTURE")
def update_ta_max_slots(self, context):
props = context.scene.revolt
slot = props.ta_current_slot
frame = props.ta_current_frame
if props.ta_max_slots > 0:
dprint("TexAnim: Updating max slots..")
# Converts the texture animations from string to dict
ta = eval(props.texture_animations)
# Creates a new texture animation if there is none in the slot
while len(ta) < props.ta_max_slots:
dprint("TexAnim: Creating new animation slot... ({}/{})".format(
len(ta) + 1, props.ta_max_slots)
)
ta.append(rvstruct.TexAnimation().as_dict())
# Saves the texture animation
props.texture_animations = str(ta)
# Updates the rest of the UI
# update_ta_current_slot(self, context)
def update_ta_max_frames(self, context):
props = context.scene.revolt
slot = props.ta_current_slot
# frame = props.ta_current_frame
dprint("TexAnim: Updating max frames..")
ta = eval(props.texture_animations)
ta[slot]["frame_count"] = props.ta_max_frames
# Creates new empty frames if there are none for the current slot
while len(ta[slot]["frames"]) < props.ta_max_frames:
dprint("Creating new animation frame... ({}/{})".format(
len(ta[slot]["frames"]) + 1, props.ta_max_frames))
new_frame = rvstruct.Frame().as_dict()
ta[slot]["frames"].append(new_frame)
props.texture_animations = str(ta)
def update_ta_current_slot(self, context):
props = context.scene.revolt
slot = props.ta_current_slot
frame = props.ta_current_frame
dprint("TexAnim: Updating current slot..")
# Converts the texture animations from string to dict
ta = eval(props.texture_animations)
# Resets the number if it's out of bounds
if slot > props.ta_max_slots - 1:
props.ta_current_slot = props.ta_max_slots - 1
return
# Saves the texture animations
props.texture_animations = str(ta)
# Updates the rest of the UI
# props.ta_max_frames = len(ta[slot]["frames"])
props.ta_max_frames = ta[slot]["frame_count"]
# update_ta_max_frames(self, context)
update_ta_current_frame(self, context)
# Texture Animation
def update_ta_current_frame(self, context):
props = context.scene.revolt
slot = props.ta_current_slot
frame = props.ta_current_frame
dprint("TexAnim: Updating current frame..")
# Converts the texture animations from string to dict
ta = eval(props.texture_animations)
# Resets the number if it's out of bounds
if frame > props.ta_max_frames - 1:
props.ta_current_frame = props.ta_max_frames - 1
return
props.ta_current_frame_tex = ta[slot]["frames"][frame]["texture"]
props.ta_current_frame_delay = ta[slot]["frames"][frame]["delay"]
uv = ta[slot]["frames"][frame]["uv"]
props.ta_current_frame_uv0 = (uv[3]["u"], 1 - uv[3]["v"])
props.ta_current_frame_uv1 = (uv[2]["u"], 1 - uv[2]["v"])
props.ta_current_frame_uv2 = (uv[1]["u"], 1 - uv[1]["v"])
props.ta_current_frame_uv3 = (uv[0]["u"], 1 - uv[0]["v"])
def update_ta_current_frame_tex(self, context):
props = context.scene.revolt
slot = props.ta_current_slot
frame = props.ta_current_frame
dprint("TexAnim: Updating current frame texture..")
# Converts the texture animations from string to dict
ta = eval(props.texture_animations)
# Sets the frame's texture
ta[slot]["frames"][frame]["texture"] = props.ta_current_frame_tex
# Saves the string again
props.texture_animations = str(ta)
def update_ta_current_frame_delay(self, context):
props = context.scene.revolt
slot = props.ta_current_slot
frame = props.ta_current_frame
dprint("TexAnim: Updating current frame delay..")
# Converts the texture animations from string to dict
ta = eval(props.texture_animations)
# Sets the frame's delay/duration
ta[slot]["frames"][frame]["delay"] = props.ta_current_frame_delay
# Saves the string again
props.texture_animations = str(ta)
def update_ta_current_frame_uv(context, num):
props = bpy.context.scene.revolt
prop_str = "ta_current_frame_uv{}".format(num)
slot = props.ta_current_slot
frame = props.ta_current_frame
# Reverses the accessor since they're saved in reverse order
num = [0, 1, 2, 3][::-1][num]
dprint("TexAnim: Updating current frame UV for {}..".format(num))
ta = eval(props.texture_animations)
ta[slot]["frames"][frame]["uv"][num]["u"] = getattr(props, prop_str)[0]
ta[slot]["frames"][frame]["uv"][num]["v"] = 1 - getattr(props, prop_str)[1]
props.texture_animations = str(ta)
def copy_uv_to_frame(context):
props = context.scene.revolt
# Copies over UV coordinates from the mesh
if context.object.data:
bm = get_edit_bmesh(context.object)
uv_layer = bm.loops.layers.uv.get("UVMap")
sel_face = get_active_face(bm)
if not sel_face:
msg_box("Please select a face first")
return
if not uv_layer:
msg_box("Please create a UV layer first")
return
for lnum in range(len(sel_face.loops)):
uv = sel_face.loops[lnum][uv_layer].uv
if lnum == 0:
props.ta_current_frame_uv0 = (uv[0], uv[1])
elif lnum == 1:
props.ta_current_frame_uv1 = (uv[0], uv[1])
elif lnum == 2:
props.ta_current_frame_uv2 = (uv[0], uv[1])
elif lnum == 3:
props.ta_current_frame_uv3 = (uv[0], uv[1])
else:
dprint("No object for UV anim")
def copy_frame_to_uv(context):
props = context.scene.revolt
if context.object.data:
bm = get_edit_bmesh(context.object)
uv_layer = bm.loops.layers.uv.get("UVMap")
sel_face = get_active_face(bm)
if not sel_face:
msg_box("Please select a face first")
return
if not uv_layer:
msg_box("Please create a UV layer first")
return
for lnum in range(len(sel_face.loops)):
uv0 = props.ta_current_frame_uv0
uv1 = props.ta_current_frame_uv1
uv2 = props.ta_current_frame_uv2
uv3 = props.ta_current_frame_uv3
if lnum == 0:
sel_face.loops[lnum][uv_layer].uv = uv0
elif lnum == 1:
sel_face.loops[lnum][uv_layer].uv = uv1
elif lnum == 2:
sel_face.loops[lnum][uv_layer].uv = uv2
elif lnum == 3:
sel_face.loops[lnum][uv_layer].uv = uv3
else:
dprint("No object for UV anim")
|
gpl-3.0
|
disigma/depot_tools
|
third_party/pylint/checkers/__init__.py
|
67
|
3927
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""utilities methods and classes for checkers
Base id of standard checkers (used in msg and report ids):
01: base
02: classes
03: format
04: import
05: misc
06: variables
07: exceptions
08: similar
09: design_analysis
10: newstyle
11: typecheck
12: logging
13: string_format
14: string_constant
15: stdlib
16: python3
17-50: not yet used: reserved for future internal checkers.
51-99: perhaps used: reserved for external checkers
The raw_metrics checker has no number associated since it doesn't emit any
messages nor reports. XXX not true, emit a 07 report !
"""
import sys
import tokenize
import warnings
from logilab.common.configuration import OptionsProviderMixIn
from pylint.reporters import diff_string
from pylint.utils import register_plugins
from pylint.interfaces import UNDEFINED
def table_lines_from_stats(stats, old_stats, columns):
"""get values listed in <columns> from <stats> and <old_stats>,
and return a formated list of values, designed to be given to a
ureport.Table object
"""
lines = []
for m_type in columns:
new = stats[m_type]
format = str # pylint: disable=redefined-builtin
if isinstance(new, float):
format = lambda num: '%.3f' % num
old = old_stats.get(m_type)
if old is not None:
diff_str = diff_string(old, new)
old = format(old)
else:
old, diff_str = 'NC', 'NC'
lines += (m_type.replace('_', ' '), format(new), old, diff_str)
return lines
class BaseChecker(OptionsProviderMixIn):
"""base class for checkers"""
# checker name (you may reuse an existing one)
name = None
# options level (0 will be displaying in --help, 1 in --long-help)
level = 1
# ordered list of options to control the ckecker behaviour
options = ()
# messages issued by this checker
msgs = {}
# reports issued by this checker
reports = ()
# mark this checker as enabled or not.
enabled = True
def __init__(self, linter=None):
"""checker instances should have the linter as argument
linter is an object implementing ILinter
"""
self.name = self.name.lower()
OptionsProviderMixIn.__init__(self)
self.linter = linter
def add_message(self, msg_id, line=None, node=None, args=None, confidence=UNDEFINED):
"""add a message of a given type"""
self.linter.add_message(msg_id, line, node, args, confidence)
# dummy methods implementing the IChecker interface
def open(self):
"""called before visiting project (i.e set of modules)"""
def close(self):
"""called after visiting project (i.e set of modules)"""
class BaseTokenChecker(BaseChecker):
"""Base class for checkers that want to have access to the token stream."""
def process_tokens(self, tokens):
"""Should be overridden by subclasses."""
raise NotImplementedError()
def initialize(linter):
"""initialize linter with checkers in this package """
register_plugins(linter, __path__[0])
__all__ = ('BaseChecker', 'initialize')
|
bsd-3-clause
|
meabsence/python-for-android
|
python3-alpha/python3-src/Lib/encodings/mac_farsi.py
|
272
|
15170
|
""" Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-farsi',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE, left-right
'!' # 0x21 -> EXCLAMATION MARK, left-right
'"' # 0x22 -> QUOTATION MARK, left-right
'#' # 0x23 -> NUMBER SIGN, left-right
'$' # 0x24 -> DOLLAR SIGN, left-right
'%' # 0x25 -> PERCENT SIGN, left-right
'&' # 0x26 -> AMPERSAND, left-right
"'" # 0x27 -> APOSTROPHE, left-right
'(' # 0x28 -> LEFT PARENTHESIS, left-right
')' # 0x29 -> RIGHT PARENTHESIS, left-right
'*' # 0x2A -> ASTERISK, left-right
'+' # 0x2B -> PLUS SIGN, left-right
',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
'-' # 0x2D -> HYPHEN-MINUS, left-right
'.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
'/' # 0x2F -> SOLIDUS, left-right
'0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO
'1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE
'2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO
'3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE
'4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE
'6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE
':' # 0x3A -> COLON, left-right
';' # 0x3B -> SEMICOLON, left-right
'<' # 0x3C -> LESS-THAN SIGN, left-right
'=' # 0x3D -> EQUALS SIGN, left-right
'>' # 0x3E -> GREATER-THAN SIGN, left-right
'?' # 0x3F -> QUESTION MARK, left-right
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET, left-right
'\\' # 0x5C -> REVERSE SOLIDUS, left-right
']' # 0x5D -> RIGHT SQUARE BRACKET, left-right
'^' # 0x5E -> CIRCUMFLEX ACCENT, left-right
'_' # 0x5F -> LOW LINE, left-right
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET, left-right
'|' # 0x7C -> VERTICAL LINE, left-right
'}' # 0x7D -> RIGHT CURLY BRACKET, left-right
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xa0' # 0x81 -> NO-BREAK SPACE, right-left
'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA
'\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0x9B -> DIVISION SIGN, right-left
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
' ' # 0xA0 -> SPACE, right-left
'!' # 0xA1 -> EXCLAMATION MARK, right-left
'"' # 0xA2 -> QUOTATION MARK, right-left
'#' # 0xA3 -> NUMBER SIGN, right-left
'$' # 0xA4 -> DOLLAR SIGN, right-left
'\u066a' # 0xA5 -> ARABIC PERCENT SIGN
'&' # 0xA6 -> AMPERSAND, right-left
"'" # 0xA7 -> APOSTROPHE, right-left
'(' # 0xA8 -> LEFT PARENTHESIS, right-left
')' # 0xA9 -> RIGHT PARENTHESIS, right-left
'*' # 0xAA -> ASTERISK, right-left
'+' # 0xAB -> PLUS SIGN, right-left
'\u060c' # 0xAC -> ARABIC COMMA
'-' # 0xAD -> HYPHEN-MINUS, right-left
'.' # 0xAE -> FULL STOP, right-left
'/' # 0xAF -> SOLIDUS, right-left
'\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override)
'\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override)
'\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override)
'\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override)
'\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override)
'\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override)
'\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override)
'\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override)
'\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override)
'\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override)
':' # 0xBA -> COLON, right-left
'\u061b' # 0xBB -> ARABIC SEMICOLON
'<' # 0xBC -> LESS-THAN SIGN, right-left
'=' # 0xBD -> EQUALS SIGN, right-left
'>' # 0xBE -> GREATER-THAN SIGN, right-left
'\u061f' # 0xBF -> ARABIC QUESTION MARK
'\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0xC7 -> ARABIC LETTER ALEF
'\u0628' # 0xC8 -> ARABIC LETTER BEH
'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xCA -> ARABIC LETTER TEH
'\u062b' # 0xCB -> ARABIC LETTER THEH
'\u062c' # 0xCC -> ARABIC LETTER JEEM
'\u062d' # 0xCD -> ARABIC LETTER HAH
'\u062e' # 0xCE -> ARABIC LETTER KHAH
'\u062f' # 0xCF -> ARABIC LETTER DAL
'\u0630' # 0xD0 -> ARABIC LETTER THAL
'\u0631' # 0xD1 -> ARABIC LETTER REH
'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
'\u0633' # 0xD3 -> ARABIC LETTER SEEN
'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
'\u0635' # 0xD5 -> ARABIC LETTER SAD
'\u0636' # 0xD6 -> ARABIC LETTER DAD
'\u0637' # 0xD7 -> ARABIC LETTER TAH
'\u0638' # 0xD8 -> ARABIC LETTER ZAH
'\u0639' # 0xD9 -> ARABIC LETTER AIN
'\u063a' # 0xDA -> ARABIC LETTER GHAIN
'[' # 0xDB -> LEFT SQUARE BRACKET, right-left
'\\' # 0xDC -> REVERSE SOLIDUS, right-left
']' # 0xDD -> RIGHT SQUARE BRACKET, right-left
'^' # 0xDE -> CIRCUMFLEX ACCENT, right-left
'_' # 0xDF -> LOW LINE, right-left
'\u0640' # 0xE0 -> ARABIC TATWEEL
'\u0641' # 0xE1 -> ARABIC LETTER FEH
'\u0642' # 0xE2 -> ARABIC LETTER QAF
'\u0643' # 0xE3 -> ARABIC LETTER KAF
'\u0644' # 0xE4 -> ARABIC LETTER LAM
'\u0645' # 0xE5 -> ARABIC LETTER MEEM
'\u0646' # 0xE6 -> ARABIC LETTER NOON
'\u0647' # 0xE7 -> ARABIC LETTER HEH
'\u0648' # 0xE8 -> ARABIC LETTER WAW
'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xEA -> ARABIC LETTER YEH
'\u064b' # 0xEB -> ARABIC FATHATAN
'\u064c' # 0xEC -> ARABIC DAMMATAN
'\u064d' # 0xED -> ARABIC KASRATAN
'\u064e' # 0xEE -> ARABIC FATHA
'\u064f' # 0xEF -> ARABIC DAMMA
'\u0650' # 0xF0 -> ARABIC KASRA
'\u0651' # 0xF1 -> ARABIC SHADDA
'\u0652' # 0xF2 -> ARABIC SUKUN
'\u067e' # 0xF3 -> ARABIC LETTER PEH
'\u0679' # 0xF4 -> ARABIC LETTER TTEH
'\u0686' # 0xF5 -> ARABIC LETTER TCHEH
'\u06d5' # 0xF6 -> ARABIC LETTER AE
'\u06a4' # 0xF7 -> ARABIC LETTER VEH
'\u06af' # 0xF8 -> ARABIC LETTER GAF
'\u0688' # 0xF9 -> ARABIC LETTER DDAL
'\u0691' # 0xFA -> ARABIC LETTER RREH
'{' # 0xFB -> LEFT CURLY BRACKET, right-left
'|' # 0xFC -> VERTICAL LINE, right-left
'}' # 0xFD -> RIGHT CURLY BRACKET, right-left
'\u0698' # 0xFE -> ARABIC LETTER JEH
'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
John-Hart/autorest
|
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureParameterGrouping/autorestparametergroupingtestservice/auto_rest_parameter_grouping_test_service.py
|
9
|
4692
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.parameter_grouping_operations import ParameterGroupingOperations
from . import models
class AutoRestParameterGroupingTestServiceConfiguration(AzureConfiguration):
"""Configuration for AutoRestParameterGroupingTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param accept_language: Gets or sets the preferred language for the
response.
:type accept_language: str
:param long_running_operation_retry_timeout: Gets or sets the retry
timeout in seconds for Long Running Operations. Default value is 30.
:type long_running_operation_retry_timeout: int
:param generate_client_request_id: When set to true a unique
x-ms-client-request-id value is generated and included in each request.
Default is true.
:type generate_client_request_id: bool
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, credentials, accept_language='en-US', long_running_operation_retry_timeout=30, generate_client_request_id=True, base_url=None, filepath=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if accept_language is not None and not isinstance(accept_language, str):
raise TypeError("Optional parameter 'accept_language' must be str.")
if not base_url:
base_url = 'https://localhost'
super(AutoRestParameterGroupingTestServiceConfiguration, self).__init__(base_url, filepath)
self.add_user_agent('autorestparametergroupingtestservice/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.accept_language = accept_language
self.long_running_operation_retry_timeout = long_running_operation_retry_timeout
self.generate_client_request_id = generate_client_request_id
class AutoRestParameterGroupingTestService(object):
"""Test Infrastructure for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestParameterGroupingTestServiceConfiguration
:ivar parameter_grouping: ParameterGrouping operations
:vartype parameter_grouping: .operations.ParameterGroupingOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param accept_language: Gets or sets the preferred language for the
response.
:type accept_language: str
:param long_running_operation_retry_timeout: Gets or sets the retry
timeout in seconds for Long Running Operations. Default value is 30.
:type long_running_operation_retry_timeout: int
:param generate_client_request_id: When set to true a unique
x-ms-client-request-id value is generated and included in each request.
Default is true.
:type generate_client_request_id: bool
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, credentials, accept_language='en-US', long_running_operation_retry_timeout=30, generate_client_request_id=True, base_url=None, filepath=None):
self.config = AutoRestParameterGroupingTestServiceConfiguration(credentials, accept_language, long_running_operation_retry_timeout, generate_client_request_id, base_url, filepath)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.parameter_grouping = ParameterGroupingOperations(
self._client, self.config, self._serialize, self._deserialize)
|
mit
|
joelagnel/ns-3
|
src/applications/bindings/modulegen__gcc_ILP32.py
|
2
|
301167
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.applications', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## application-container.h (module 'network'): ns3::ApplicationContainer [class]
module.add_class('ApplicationContainer', import_from_module='ns.network')
## attribute-list.h (module 'core'): ns3::AttributeList [class]
module.add_class('AttributeList', import_from_module='ns.core')
## average.h (module 'contrib'): ns3::Average<double> [class]
module.add_class('Average', import_from_module='ns.contrib', template_parameters=['double'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## bulk-send-helper.h (module 'applications'): ns3::BulkSendHelper [class]
module.add_class('BulkSendHelper')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## data-rate.h (module 'network'): ns3::DataRate [class]
module.add_class('DataRate', import_from_module='ns.network')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## high-precision-128.h (module 'core'): ns3::HighPrecision [class]
module.add_class('HighPrecision', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## on-off-helper.h (module 'applications'): ns3::OnOffHelper [class]
module.add_class('OnOffHelper')
## packet-loss-counter.h (module 'applications'): ns3::PacketLossCounter [class]
module.add_class('PacketLossCounter')
## packet-sink-helper.h (module 'applications'): ns3::PacketSinkHelper [class]
module.add_class('PacketSinkHelper')
## ping6-helper.h (module 'applications'): ns3::Ping6Helper [class]
module.add_class('Ping6Helper')
## random-variable.h (module 'core'): ns3::RandomVariable [class]
module.add_class('RandomVariable', import_from_module='ns.core')
## random-variable.h (module 'core'): ns3::SeedManager [class]
module.add_class('SeedManager', import_from_module='ns.core')
## random-variable.h (module 'core'): ns3::SequentialVariable [class]
module.add_class('SequentialVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', is_singleton=True, import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## random-variable.h (module 'core'): ns3::TriangularVariable [class]
module.add_class('TriangularVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo [struct]
module.add_class('AttributeInfo', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## udp-client-server-helper.h (module 'applications'): ns3::UdpClientHelper [class]
module.add_class('UdpClientHelper')
## udp-echo-helper.h (module 'applications'): ns3::UdpEchoClientHelper [class]
module.add_class('UdpEchoClientHelper')
## udp-echo-helper.h (module 'applications'): ns3::UdpEchoServerHelper [class]
module.add_class('UdpEchoServerHelper')
## udp-client-server-helper.h (module 'applications'): ns3::UdpServerHelper [class]
module.add_class('UdpServerHelper')
## udp-client-server-helper.h (module 'applications'): ns3::UdpTraceClientHelper [class]
module.add_class('UdpTraceClientHelper')
## random-variable.h (module 'core'): ns3::UniformVariable [class]
module.add_class('UniformVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## attribute-list.h (module 'core'): ns3::UnsafeAttributeList [class]
module.add_class('UnsafeAttributeList', import_from_module='ns.core')
## v4ping-helper.h (module 'applications'): ns3::V4PingHelper [class]
module.add_class('V4PingHelper')
## random-variable.h (module 'core'): ns3::WeibullVariable [class]
module.add_class('WeibullVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ZetaVariable [class]
module.add_class('ZetaVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ZipfVariable [class]
module.add_class('ZipfVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## random-variable.h (module 'core'): ns3::ConstantVariable [class]
module.add_class('ConstantVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::DeterministicVariable [class]
module.add_class('DeterministicVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::EmpiricalVariable [class]
module.add_class('EmpiricalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ErlangVariable [class]
module.add_class('ErlangVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ExponentialVariable [class]
module.add_class('ExponentialVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::GammaVariable [class]
module.add_class('GammaVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## random-variable.h (module 'core'): ns3::IntEmpiricalVariable [class]
module.add_class('IntEmpiricalVariable', import_from_module='ns.core', parent=root_module['ns3::EmpiricalVariable'])
## random-variable.h (module 'core'): ns3::LogNormalVariable [class]
module.add_class('LogNormalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::NormalVariable [class]
module.add_class('NormalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## random-variable.h (module 'core'): ns3::ParetoVariable [class]
module.add_class('ParetoVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable'])
## nstime.h (module 'core'): ns3::Scalar [class]
module.add_class('Scalar', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Scalar [class]
root_module['ns3::Scalar'].implicitly_converts_to(root_module['ns3::Time'])
## seq-ts-header.h (module 'applications'): ns3::SeqTsHeader [class]
module.add_class('SeqTsHeader', parent=root_module['ns3::Header'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::RadvdInterface', 'ns3::empty', 'ns3::DefaultDeleter<ns3::RadvdInterface>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::RadvdPrefix', 'ns3::empty', 'ns3::DefaultDeleter<ns3::RadvdPrefix>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## socket.h (module 'network'): ns3::Socket [class]
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration]
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::SocketType [enumeration]
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::SocketAddressTag [class]
module.add_class('SocketAddressTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpTtlTag [class]
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class]
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## application.h (module 'network'): ns3::Application [class]
module.add_class('Application', import_from_module='ns.network', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## bulk-send-application.h (module 'applications'): ns3::BulkSendApplication [class]
module.add_class('BulkSendApplication', parent=root_module['ns3::Application'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## data-rate.h (module 'network'): ns3::DataRateChecker [class]
module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## data-rate.h (module 'network'): ns3::DataRateValue [class]
module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## onoff-application.h (module 'applications'): ns3::OnOffApplication [class]
module.add_class('OnOffApplication', parent=root_module['ns3::Application'])
## packet-sink.h (module 'applications'): ns3::PacketSink [class]
module.add_class('PacketSink', parent=root_module['ns3::Application'])
## ping6.h (module 'applications'): ns3::Ping6 [class]
module.add_class('Ping6', parent=root_module['ns3::Application'])
## radvd.h (module 'applications'): ns3::Radvd [class]
module.add_class('Radvd', parent=root_module['ns3::Application'])
## radvd-interface.h (module 'applications'): ns3::RadvdInterface [class]
module.add_class('RadvdInterface', parent=root_module['ns3::SimpleRefCount< ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >'])
## radvd-prefix.h (module 'applications'): ns3::RadvdPrefix [class]
module.add_class('RadvdPrefix', parent=root_module['ns3::SimpleRefCount< ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >'])
## random-variable.h (module 'core'): ns3::RandomVariableChecker [class]
module.add_class('RandomVariableChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## random-variable.h (module 'core'): ns3::RandomVariableValue [class]
module.add_class('RandomVariableValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## udp-client.h (module 'applications'): ns3::UdpClient [class]
module.add_class('UdpClient', parent=root_module['ns3::Application'])
## udp-echo-client.h (module 'applications'): ns3::UdpEchoClient [class]
module.add_class('UdpEchoClient', parent=root_module['ns3::Application'])
## udp-echo-server.h (module 'applications'): ns3::UdpEchoServer [class]
module.add_class('UdpEchoServer', parent=root_module['ns3::Application'])
## udp-server.h (module 'applications'): ns3::UdpServer [class]
module.add_class('UdpServer', parent=root_module['ns3::Application'])
## udp-trace-client.h (module 'applications'): ns3::UdpTraceClient [class]
module.add_class('UdpTraceClient', parent=root_module['ns3::Application'])
## v4ping.h (module 'applications'): ns3::V4Ping [class]
module.add_class('V4Ping', parent=root_module['ns3::Application'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type='vector')
module.add_container('std::list< ns3::Ptr< ns3::Socket > >', 'ns3::Ptr< ns3::Socket >', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::RadvdPrefix > >', 'ns3::Ptr< ns3::RadvdPrefix >', container_type='list')
typehandlers.add_type_alias('ns3::Time', 'ns3::TimeSquare')
typehandlers.add_type_alias('ns3::Time*', 'ns3::TimeSquare*')
typehandlers.add_type_alias('ns3::Time&', 'ns3::TimeSquare&')
module.add_typedef(root_module['ns3::Time'], 'TimeSquare')
typehandlers.add_type_alias('ns3::Time', 'ns3::TimeInvert')
typehandlers.add_type_alias('ns3::Time*', 'ns3::TimeInvert*')
typehandlers.add_type_alias('ns3::Time&', 'ns3::TimeInvert&')
module.add_typedef(root_module['ns3::Time'], 'TimeInvert')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3ApplicationContainer_methods(root_module, root_module['ns3::ApplicationContainer'])
register_Ns3AttributeList_methods(root_module, root_module['ns3::AttributeList'])
register_Ns3Average__Double_methods(root_module, root_module['ns3::Average< double >'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3BulkSendHelper_methods(root_module, root_module['ns3::BulkSendHelper'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3HighPrecision_methods(root_module, root_module['ns3::HighPrecision'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3OnOffHelper_methods(root_module, root_module['ns3::OnOffHelper'])
register_Ns3PacketLossCounter_methods(root_module, root_module['ns3::PacketLossCounter'])
register_Ns3PacketSinkHelper_methods(root_module, root_module['ns3::PacketSinkHelper'])
register_Ns3Ping6Helper_methods(root_module, root_module['ns3::Ping6Helper'])
register_Ns3RandomVariable_methods(root_module, root_module['ns3::RandomVariable'])
register_Ns3SeedManager_methods(root_module, root_module['ns3::SeedManager'])
register_Ns3SequentialVariable_methods(root_module, root_module['ns3::SequentialVariable'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TriangularVariable_methods(root_module, root_module['ns3::TriangularVariable'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInfo_methods(root_module, root_module['ns3::TypeId::AttributeInfo'])
register_Ns3UdpClientHelper_methods(root_module, root_module['ns3::UdpClientHelper'])
register_Ns3UdpEchoClientHelper_methods(root_module, root_module['ns3::UdpEchoClientHelper'])
register_Ns3UdpEchoServerHelper_methods(root_module, root_module['ns3::UdpEchoServerHelper'])
register_Ns3UdpServerHelper_methods(root_module, root_module['ns3::UdpServerHelper'])
register_Ns3UdpTraceClientHelper_methods(root_module, root_module['ns3::UdpTraceClientHelper'])
register_Ns3UniformVariable_methods(root_module, root_module['ns3::UniformVariable'])
register_Ns3UnsafeAttributeList_methods(root_module, root_module['ns3::UnsafeAttributeList'])
register_Ns3V4PingHelper_methods(root_module, root_module['ns3::V4PingHelper'])
register_Ns3WeibullVariable_methods(root_module, root_module['ns3::WeibullVariable'])
register_Ns3ZetaVariable_methods(root_module, root_module['ns3::ZetaVariable'])
register_Ns3ZipfVariable_methods(root_module, root_module['ns3::ZipfVariable'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3ConstantVariable_methods(root_module, root_module['ns3::ConstantVariable'])
register_Ns3DeterministicVariable_methods(root_module, root_module['ns3::DeterministicVariable'])
register_Ns3EmpiricalVariable_methods(root_module, root_module['ns3::EmpiricalVariable'])
register_Ns3ErlangVariable_methods(root_module, root_module['ns3::ErlangVariable'])
register_Ns3ExponentialVariable_methods(root_module, root_module['ns3::ExponentialVariable'])
register_Ns3GammaVariable_methods(root_module, root_module['ns3::GammaVariable'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3IntEmpiricalVariable_methods(root_module, root_module['ns3::IntEmpiricalVariable'])
register_Ns3LogNormalVariable_methods(root_module, root_module['ns3::LogNormalVariable'])
register_Ns3NormalVariable_methods(root_module, root_module['ns3::NormalVariable'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3ParetoVariable_methods(root_module, root_module['ns3::ParetoVariable'])
register_Ns3Scalar_methods(root_module, root_module['ns3::Scalar'])
register_Ns3SeqTsHeader_methods(root_module, root_module['ns3::SeqTsHeader'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3RadvdInterface_Ns3Empty_Ns3DefaultDeleter__lt__ns3RadvdInterface__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >'])
register_Ns3SimpleRefCount__Ns3RadvdPrefix_Ns3Empty_Ns3DefaultDeleter__lt__ns3RadvdPrefix__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >'])
register_Ns3Socket_methods(root_module, root_module['ns3::Socket'])
register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag'])
register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag'])
register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag'])
register_Ns3Application_methods(root_module, root_module['ns3::Application'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BulkSendApplication_methods(root_module, root_module['ns3::BulkSendApplication'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker'])
register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OnOffApplication_methods(root_module, root_module['ns3::OnOffApplication'])
register_Ns3PacketSink_methods(root_module, root_module['ns3::PacketSink'])
register_Ns3Ping6_methods(root_module, root_module['ns3::Ping6'])
register_Ns3Radvd_methods(root_module, root_module['ns3::Radvd'])
register_Ns3RadvdInterface_methods(root_module, root_module['ns3::RadvdInterface'])
register_Ns3RadvdPrefix_methods(root_module, root_module['ns3::RadvdPrefix'])
register_Ns3RandomVariableChecker_methods(root_module, root_module['ns3::RandomVariableChecker'])
register_Ns3RandomVariableValue_methods(root_module, root_module['ns3::RandomVariableValue'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UdpClient_methods(root_module, root_module['ns3::UdpClient'])
register_Ns3UdpEchoClient_methods(root_module, root_module['ns3::UdpEchoClient'])
register_Ns3UdpEchoServer_methods(root_module, root_module['ns3::UdpEchoServer'])
register_Ns3UdpServer_methods(root_module, root_module['ns3::UdpServer'])
register_Ns3UdpTraceClient_methods(root_module, root_module['ns3::UdpTraceClient'])
register_Ns3V4Ping_methods(root_module, root_module['ns3::V4Ping'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3ApplicationContainer_methods(root_module, cls):
## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer(ns3::ApplicationContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ApplicationContainer const &', 'arg0')])
## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer() [constructor]
cls.add_constructor([])
## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer(ns3::Ptr<ns3::Application> application) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Application >', 'application')])
## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer(std::string name) [constructor]
cls.add_constructor([param('std::string', 'name')])
## application-container.h (module 'network'): void ns3::ApplicationContainer::Add(ns3::ApplicationContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::ApplicationContainer', 'other')])
## application-container.h (module 'network'): void ns3::ApplicationContainer::Add(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Application >', 'application')])
## application-container.h (module 'network'): void ns3::ApplicationContainer::Add(std::string name) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name')])
## application-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Application>*,std::vector<ns3::Ptr<ns3::Application>, std::allocator<ns3::Ptr<ns3::Application> > > > ns3::ApplicationContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Application > const, std::vector< ns3::Ptr< ns3::Application > > >',
[],
is_const=True)
## application-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Application>*,std::vector<ns3::Ptr<ns3::Application>, std::allocator<ns3::Ptr<ns3::Application> > > > ns3::ApplicationContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Application > const, std::vector< ns3::Ptr< ns3::Application > > >',
[],
is_const=True)
## application-container.h (module 'network'): ns3::Ptr<ns3::Application> ns3::ApplicationContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'i')],
is_const=True)
## application-container.h (module 'network'): uint32_t ns3::ApplicationContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## application-container.h (module 'network'): void ns3::ApplicationContainer::Start(ns3::Time start) [member function]
cls.add_method('Start',
'void',
[param('ns3::Time', 'start')])
## application-container.h (module 'network'): void ns3::ApplicationContainer::Stop(ns3::Time stop) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time', 'stop')])
return
def register_Ns3AttributeList_methods(root_module, cls):
## attribute-list.h (module 'core'): ns3::AttributeList::AttributeList() [constructor]
cls.add_constructor([])
## attribute-list.h (module 'core'): ns3::AttributeList::AttributeList(ns3::AttributeList const & o) [copy constructor]
cls.add_constructor([param('ns3::AttributeList const &', 'o')])
## attribute-list.h (module 'core'): bool ns3::AttributeList::DeserializeFromString(std::string value) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value')])
## attribute-list.h (module 'core'): static ns3::AttributeList * ns3::AttributeList::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::AttributeList *',
[],
is_static=True)
## attribute-list.h (module 'core'): void ns3::AttributeList::Reset() [member function]
cls.add_method('Reset',
'void',
[])
## attribute-list.h (module 'core'): std::string ns3::AttributeList::SerializeToString() const [member function]
cls.add_method('SerializeToString',
'std::string',
[],
is_const=True)
## attribute-list.h (module 'core'): void ns3::AttributeList::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## attribute-list.h (module 'core'): bool ns3::AttributeList::SetFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## attribute-list.h (module 'core'): void ns3::AttributeList::SetWithTid(ns3::TypeId tid, std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetWithTid',
'void',
[param('ns3::TypeId', 'tid'), param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3Average__Double_methods(root_module, cls):
## average.h (module 'contrib'): ns3::Average<double>::Average(ns3::Average<double> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Average< double > const &', 'arg0')])
## average.h (module 'contrib'): ns3::Average<double>::Average() [constructor]
cls.add_constructor([])
## average.h (module 'contrib'): double ns3::Average<double>::Avg() const [member function]
cls.add_method('Avg',
'double',
[],
is_const=True)
## average.h (module 'contrib'): uint32_t ns3::Average<double>::Count() const [member function]
cls.add_method('Count',
'uint32_t',
[],
is_const=True)
## average.h (module 'contrib'): double ns3::Average<double>::Error90() const [member function]
cls.add_method('Error90',
'double',
[],
is_const=True)
## average.h (module 'contrib'): double ns3::Average<double>::Error95() const [member function]
cls.add_method('Error95',
'double',
[],
is_const=True)
## average.h (module 'contrib'): double ns3::Average<double>::Error99() const [member function]
cls.add_method('Error99',
'double',
[],
is_const=True)
## average.h (module 'contrib'): double ns3::Average<double>::Max() const [member function]
cls.add_method('Max',
'double',
[],
is_const=True)
## average.h (module 'contrib'): double ns3::Average<double>::Mean() const [member function]
cls.add_method('Mean',
'double',
[],
is_const=True)
## average.h (module 'contrib'): double ns3::Average<double>::Min() const [member function]
cls.add_method('Min',
'double',
[],
is_const=True)
## average.h (module 'contrib'): void ns3::Average<double>::Reset() [member function]
cls.add_method('Reset',
'void',
[])
## average.h (module 'contrib'): double ns3::Average<double>::Stddev() const [member function]
cls.add_method('Stddev',
'double',
[],
is_const=True)
## average.h (module 'contrib'): void ns3::Average<double>::Update(double const & x) [member function]
cls.add_method('Update',
'void',
[param('double const &', 'x')])
## average.h (module 'contrib'): double ns3::Average<double>::Var() const [member function]
cls.add_method('Var',
'double',
[],
is_const=True)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3BulkSendHelper_methods(root_module, cls):
## bulk-send-helper.h (module 'applications'): ns3::BulkSendHelper::BulkSendHelper(ns3::BulkSendHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BulkSendHelper const &', 'arg0')])
## bulk-send-helper.h (module 'applications'): ns3::BulkSendHelper::BulkSendHelper(std::string protocol, ns3::Address address) [constructor]
cls.add_constructor([param('std::string', 'protocol'), param('ns3::Address', 'address')])
## bulk-send-helper.h (module 'applications'): ns3::ApplicationContainer ns3::BulkSendHelper::Install(ns3::NodeContainer c) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'c')],
is_const=True)
## bulk-send-helper.h (module 'applications'): ns3::ApplicationContainer ns3::BulkSendHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## bulk-send-helper.h (module 'applications'): ns3::ApplicationContainer ns3::BulkSendHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('std::string', 'nodeName')],
is_const=True)
## bulk-send-helper.h (module 'applications'): void ns3::BulkSendHelper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3DataRate_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('>=')
## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRate const &', 'arg0')])
## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor]
cls.add_constructor([param('uint64_t', 'bps')])
## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor]
cls.add_constructor([param('std::string', 'rate')])
## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function]
cls.add_method('CalculateTxTime',
'double',
[param('uint32_t', 'bytes')],
is_const=True)
## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function]
cls.add_method('GetBitRate',
'uint64_t',
[],
is_const=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3HighPrecision_methods(root_module, cls):
cls.add_output_stream_operator()
## high-precision-128.h (module 'core'): ns3::HighPrecision::HighPrecision(ns3::HighPrecision const & arg0) [copy constructor]
cls.add_constructor([param('ns3::HighPrecision const &', 'arg0')])
## high-precision-128.h (module 'core'): ns3::HighPrecision::HighPrecision(int64_t high, uint64_t low) [constructor]
cls.add_constructor([param('int64_t', 'high'), param('uint64_t', 'low')])
## high-precision-128.h (module 'core'): ns3::HighPrecision::HighPrecision() [constructor]
cls.add_constructor([])
## high-precision-128.h (module 'core'): ns3::HighPrecision::HighPrecision(int64_t value, bool dummy) [constructor]
cls.add_constructor([param('int64_t', 'value'), param('bool', 'dummy')])
## high-precision-128.h (module 'core'): ns3::HighPrecision::HighPrecision(double value) [constructor]
cls.add_constructor([param('double', 'value')])
## high-precision-128.h (module 'core'): void ns3::HighPrecision::Add(ns3::HighPrecision const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::HighPrecision const &', 'o')])
## high-precision-128.h (module 'core'): int ns3::HighPrecision::Compare(ns3::HighPrecision const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::HighPrecision const &', 'o')],
is_const=True)
## high-precision-128.h (module 'core'): void ns3::HighPrecision::Div(ns3::HighPrecision const & o) [member function]
cls.add_method('Div',
'void',
[param('ns3::HighPrecision const &', 'o')])
## high-precision-128.h (module 'core'): double ns3::HighPrecision::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## high-precision-128.h (module 'core'): int64_t ns3::HighPrecision::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## high-precision-128.h (module 'core'): int64_t ns3::HighPrecision::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## high-precision-128.h (module 'core'): uint64_t ns3::HighPrecision::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## high-precision-128.h (module 'core'): static ns3::HighPrecision ns3::HighPrecision::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::HighPrecision',
[param('uint64_t', 'v')],
is_static=True)
## high-precision-128.h (module 'core'): void ns3::HighPrecision::Mul(ns3::HighPrecision const & o) [member function]
cls.add_method('Mul',
'void',
[param('ns3::HighPrecision const &', 'o')])
## high-precision-128.h (module 'core'): void ns3::HighPrecision::MulByInvert(ns3::HighPrecision const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::HighPrecision const &', 'o')])
## high-precision-128.h (module 'core'): void ns3::HighPrecision::Sub(ns3::HighPrecision const & o) [member function]
cls.add_method('Sub',
'void',
[param('ns3::HighPrecision const &', 'o')])
## high-precision-128.h (module 'core'): static ns3::HighPrecision ns3::HighPrecision::Zero() [member function]
cls.add_method('Zero',
'ns3::HighPrecision',
[],
is_static=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(ns3::AttributeList const & list) [member function]
cls.add_method('Set',
'void',
[param('ns3::AttributeList const &', 'list')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3OnOffHelper_methods(root_module, cls):
## on-off-helper.h (module 'applications'): ns3::OnOffHelper::OnOffHelper(ns3::OnOffHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OnOffHelper const &', 'arg0')])
## on-off-helper.h (module 'applications'): ns3::OnOffHelper::OnOffHelper(std::string protocol, ns3::Address address) [constructor]
cls.add_constructor([param('std::string', 'protocol'), param('ns3::Address', 'address')])
## on-off-helper.h (module 'applications'): ns3::ApplicationContainer ns3::OnOffHelper::Install(ns3::NodeContainer c) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'c')],
is_const=True)
## on-off-helper.h (module 'applications'): ns3::ApplicationContainer ns3::OnOffHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## on-off-helper.h (module 'applications'): ns3::ApplicationContainer ns3::OnOffHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('std::string', 'nodeName')],
is_const=True)
## on-off-helper.h (module 'applications'): void ns3::OnOffHelper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3PacketLossCounter_methods(root_module, cls):
## packet-loss-counter.h (module 'applications'): ns3::PacketLossCounter::PacketLossCounter(ns3::PacketLossCounter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketLossCounter const &', 'arg0')])
## packet-loss-counter.h (module 'applications'): ns3::PacketLossCounter::PacketLossCounter(uint8_t bitmapSize) [constructor]
cls.add_constructor([param('uint8_t', 'bitmapSize')])
## packet-loss-counter.h (module 'applications'): uint16_t ns3::PacketLossCounter::GetBitMapSize() const [member function]
cls.add_method('GetBitMapSize',
'uint16_t',
[],
is_const=True)
## packet-loss-counter.h (module 'applications'): uint32_t ns3::PacketLossCounter::GetLost() const [member function]
cls.add_method('GetLost',
'uint32_t',
[],
is_const=True)
## packet-loss-counter.h (module 'applications'): void ns3::PacketLossCounter::NotifyReceived(uint32_t seq) [member function]
cls.add_method('NotifyReceived',
'void',
[param('uint32_t', 'seq')])
## packet-loss-counter.h (module 'applications'): void ns3::PacketLossCounter::SetBitMapSize(uint16_t size) [member function]
cls.add_method('SetBitMapSize',
'void',
[param('uint16_t', 'size')])
return
def register_Ns3PacketSinkHelper_methods(root_module, cls):
## packet-sink-helper.h (module 'applications'): ns3::PacketSinkHelper::PacketSinkHelper(ns3::PacketSinkHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketSinkHelper const &', 'arg0')])
## packet-sink-helper.h (module 'applications'): ns3::PacketSinkHelper::PacketSinkHelper(std::string protocol, ns3::Address address) [constructor]
cls.add_constructor([param('std::string', 'protocol'), param('ns3::Address', 'address')])
## packet-sink-helper.h (module 'applications'): ns3::ApplicationContainer ns3::PacketSinkHelper::Install(ns3::NodeContainer c) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'c')],
is_const=True)
## packet-sink-helper.h (module 'applications'): ns3::ApplicationContainer ns3::PacketSinkHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## packet-sink-helper.h (module 'applications'): ns3::ApplicationContainer ns3::PacketSinkHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('std::string', 'nodeName')],
is_const=True)
## packet-sink-helper.h (module 'applications'): void ns3::PacketSinkHelper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3Ping6Helper_methods(root_module, cls):
## ping6-helper.h (module 'applications'): ns3::Ping6Helper::Ping6Helper(ns3::Ping6Helper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ping6Helper const &', 'arg0')])
## ping6-helper.h (module 'applications'): ns3::Ping6Helper::Ping6Helper() [constructor]
cls.add_constructor([])
## ping6-helper.h (module 'applications'): ns3::ApplicationContainer ns3::Ping6Helper::Install(ns3::NodeContainer c) [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'c')])
## ping6-helper.h (module 'applications'): void ns3::Ping6Helper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## ping6-helper.h (module 'applications'): void ns3::Ping6Helper::SetIfIndex(uint32_t ifIndex) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t', 'ifIndex')])
## ping6-helper.h (module 'applications'): void ns3::Ping6Helper::SetLocal(ns3::Ipv6Address ip) [member function]
cls.add_method('SetLocal',
'void',
[param('ns3::Ipv6Address', 'ip')])
## ping6-helper.h (module 'applications'): void ns3::Ping6Helper::SetRemote(ns3::Ipv6Address ip) [member function]
cls.add_method('SetRemote',
'void',
[param('ns3::Ipv6Address', 'ip')])
## ping6-helper.h (module 'applications'): void ns3::Ping6Helper::SetRoutersAddress(std::vector<ns3::Ipv6Address, std::allocator<ns3::Ipv6Address> > routers) [member function]
cls.add_method('SetRoutersAddress',
'void',
[param('std::vector< ns3::Ipv6Address >', 'routers')])
return
def register_Ns3RandomVariable_methods(root_module, cls):
cls.add_output_stream_operator()
## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable(ns3::RandomVariable const & o) [copy constructor]
cls.add_constructor([param('ns3::RandomVariable const &', 'o')])
## random-variable.h (module 'core'): uint32_t ns3::RandomVariable::GetInteger() const [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::RandomVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
return
def register_Ns3SeedManager_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::SeedManager::SeedManager() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::SeedManager::SeedManager(ns3::SeedManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SeedManager const &', 'arg0')])
## random-variable.h (module 'core'): static bool ns3::SeedManager::CheckSeed(uint32_t seed) [member function]
cls.add_method('CheckSeed',
'bool',
[param('uint32_t', 'seed')],
is_static=True)
## random-variable.h (module 'core'): static uint32_t ns3::SeedManager::GetRun() [member function]
cls.add_method('GetRun',
'uint32_t',
[],
is_static=True)
## random-variable.h (module 'core'): static uint32_t ns3::SeedManager::GetSeed() [member function]
cls.add_method('GetSeed',
'uint32_t',
[],
is_static=True)
## random-variable.h (module 'core'): static void ns3::SeedManager::SetRun(uint32_t run) [member function]
cls.add_method('SetRun',
'void',
[param('uint32_t', 'run')],
is_static=True)
## random-variable.h (module 'core'): static void ns3::SeedManager::SetSeed(uint32_t seed) [member function]
cls.add_method('SetSeed',
'void',
[param('uint32_t', 'seed')],
is_static=True)
return
def register_Ns3SequentialVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(ns3::SequentialVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SequentialVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, double i=1, uint32_t c=1) [constructor]
cls.add_constructor([param('double', 'f'), param('double', 'l'), param('double', 'i', default_value='1'), param('uint32_t', 'c', default_value='1')])
## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, ns3::RandomVariable const & i, uint32_t c=1) [constructor]
cls.add_constructor([param('double', 'f'), param('double', 'l'), param('ns3::RandomVariable const &', 'i'), param('uint32_t', 'c', default_value='1')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Next() [member function]
cls.add_method('Next',
'ns3::Time',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::RunOneEvent() [member function]
cls.add_method('RunOneEvent',
'void',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::HighPrecision const & data) [constructor]
cls.add_constructor([param('ns3::HighPrecision const &', 'data')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): ns3::HighPrecision const & ns3::Time::GetHighPrecision() const [member function]
cls.add_method('GetHighPrecision',
'ns3::HighPrecision const &',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): ns3::HighPrecision * ns3::Time::PeekHighPrecision() [member function]
cls.add_method('PeekHighPrecision',
'ns3::HighPrecision *',
[])
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static double ns3::Time::ToDouble(ns3::Time const & time, ns3::Time::Unit timeUnit) [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time const &', 'time'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static uint64_t ns3::Time::ToInteger(ns3::Time const & time, ns3::Time::Unit timeUnit) [member function]
cls.add_method('ToInteger',
'uint64_t',
[param('ns3::Time const &', 'time'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
return
def register_Ns3TriangularVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(ns3::TriangularVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TriangularVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(double s, double l, double mean) [constructor]
cls.add_constructor([param('double', 's'), param('double', 'l'), param('double', 'mean')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeAccessor const> ns3::TypeId::GetAttributeAccessor(uint32_t i) const [member function]
cls.add_method('GetAttributeAccessor',
'ns3::Ptr< ns3::AttributeAccessor const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeChecker const> ns3::TypeId::GetAttributeChecker(uint32_t i) const [member function]
cls.add_method('GetAttributeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeFlags(uint32_t i) const [member function]
cls.add_method('GetAttributeFlags',
'uint32_t',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeHelp(uint32_t i) const [member function]
cls.add_method('GetAttributeHelp',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue const> ns3::TypeId::GetAttributeInitialValue(uint32_t i) const [member function]
cls.add_method('GetAttributeInitialValue',
'ns3::Ptr< ns3::AttributeValue const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeName(uint32_t i) const [member function]
cls.add_method('GetAttributeName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::GetTraceSourceAccessor(uint32_t i) const [member function]
cls.add_method('GetTraceSourceAccessor',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetTraceSourceHelp(uint32_t i) const [member function]
cls.add_method('GetTraceSourceHelp',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetTraceSourceName(uint32_t i) const [member function]
cls.add_method('GetTraceSourceName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupAttributeByFullName(std::string fullName, ns3::TypeId::AttributeInfo * info) [member function]
cls.add_method('LookupAttributeByFullName',
'bool',
[param('std::string', 'fullName'), param('ns3::TypeId::AttributeInfo *', 'info')],
is_static=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInfo * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInfo *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInfo_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::AttributeInfo() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::AttributeInfo(ns3::TypeId::AttributeInfo const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInfo const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInfo::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3UdpClientHelper_methods(root_module, cls):
## udp-client-server-helper.h (module 'applications'): ns3::UdpClientHelper::UdpClientHelper(ns3::UdpClientHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpClientHelper const &', 'arg0')])
## udp-client-server-helper.h (module 'applications'): ns3::UdpClientHelper::UdpClientHelper() [constructor]
cls.add_constructor([])
## udp-client-server-helper.h (module 'applications'): ns3::UdpClientHelper::UdpClientHelper(ns3::Ipv4Address ip, uint16_t port) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port')])
## udp-client-server-helper.h (module 'applications'): ns3::ApplicationContainer ns3::UdpClientHelper::Install(ns3::NodeContainer c) [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'c')])
## udp-client-server-helper.h (module 'applications'): void ns3::UdpClientHelper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3UdpEchoClientHelper_methods(root_module, cls):
## udp-echo-helper.h (module 'applications'): ns3::UdpEchoClientHelper::UdpEchoClientHelper(ns3::UdpEchoClientHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpEchoClientHelper const &', 'arg0')])
## udp-echo-helper.h (module 'applications'): ns3::UdpEchoClientHelper::UdpEchoClientHelper(ns3::Ipv4Address ip, uint16_t port) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port')])
## udp-echo-helper.h (module 'applications'): ns3::ApplicationContainer ns3::UdpEchoClientHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## udp-echo-helper.h (module 'applications'): ns3::ApplicationContainer ns3::UdpEchoClientHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('std::string', 'nodeName')],
is_const=True)
## udp-echo-helper.h (module 'applications'): ns3::ApplicationContainer ns3::UdpEchoClientHelper::Install(ns3::NodeContainer c) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'c')],
is_const=True)
## udp-echo-helper.h (module 'applications'): void ns3::UdpEchoClientHelper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## udp-echo-helper.h (module 'applications'): void ns3::UdpEchoClientHelper::SetFill(ns3::Ptr<ns3::Application> app, std::string fill) [member function]
cls.add_method('SetFill',
'void',
[param('ns3::Ptr< ns3::Application >', 'app'), param('std::string', 'fill')])
## udp-echo-helper.h (module 'applications'): void ns3::UdpEchoClientHelper::SetFill(ns3::Ptr<ns3::Application> app, uint8_t fill, uint32_t dataLength) [member function]
cls.add_method('SetFill',
'void',
[param('ns3::Ptr< ns3::Application >', 'app'), param('uint8_t', 'fill'), param('uint32_t', 'dataLength')])
## udp-echo-helper.h (module 'applications'): void ns3::UdpEchoClientHelper::SetFill(ns3::Ptr<ns3::Application> app, uint8_t * fill, uint32_t fillLength, uint32_t dataLength) [member function]
cls.add_method('SetFill',
'void',
[param('ns3::Ptr< ns3::Application >', 'app'), param('uint8_t *', 'fill'), param('uint32_t', 'fillLength'), param('uint32_t', 'dataLength')])
return
def register_Ns3UdpEchoServerHelper_methods(root_module, cls):
## udp-echo-helper.h (module 'applications'): ns3::UdpEchoServerHelper::UdpEchoServerHelper(ns3::UdpEchoServerHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpEchoServerHelper const &', 'arg0')])
## udp-echo-helper.h (module 'applications'): ns3::UdpEchoServerHelper::UdpEchoServerHelper(uint16_t port) [constructor]
cls.add_constructor([param('uint16_t', 'port')])
## udp-echo-helper.h (module 'applications'): ns3::ApplicationContainer ns3::UdpEchoServerHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## udp-echo-helper.h (module 'applications'): ns3::ApplicationContainer ns3::UdpEchoServerHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('std::string', 'nodeName')],
is_const=True)
## udp-echo-helper.h (module 'applications'): ns3::ApplicationContainer ns3::UdpEchoServerHelper::Install(ns3::NodeContainer c) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'c')],
is_const=True)
## udp-echo-helper.h (module 'applications'): void ns3::UdpEchoServerHelper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3UdpServerHelper_methods(root_module, cls):
## udp-client-server-helper.h (module 'applications'): ns3::UdpServerHelper::UdpServerHelper(ns3::UdpServerHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpServerHelper const &', 'arg0')])
## udp-client-server-helper.h (module 'applications'): ns3::UdpServerHelper::UdpServerHelper() [constructor]
cls.add_constructor([])
## udp-client-server-helper.h (module 'applications'): ns3::UdpServerHelper::UdpServerHelper(uint16_t port) [constructor]
cls.add_constructor([param('uint16_t', 'port')])
## udp-client-server-helper.h (module 'applications'): ns3::Ptr<ns3::UdpServer> ns3::UdpServerHelper::GetServer() [member function]
cls.add_method('GetServer',
'ns3::Ptr< ns3::UdpServer >',
[])
## udp-client-server-helper.h (module 'applications'): ns3::ApplicationContainer ns3::UdpServerHelper::Install(ns3::NodeContainer c) [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'c')])
## udp-client-server-helper.h (module 'applications'): void ns3::UdpServerHelper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3UdpTraceClientHelper_methods(root_module, cls):
## udp-client-server-helper.h (module 'applications'): ns3::UdpTraceClientHelper::UdpTraceClientHelper(ns3::UdpTraceClientHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpTraceClientHelper const &', 'arg0')])
## udp-client-server-helper.h (module 'applications'): ns3::UdpTraceClientHelper::UdpTraceClientHelper() [constructor]
cls.add_constructor([])
## udp-client-server-helper.h (module 'applications'): ns3::UdpTraceClientHelper::UdpTraceClientHelper(ns3::Ipv4Address ip, uint16_t port, std::string filename) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port'), param('std::string', 'filename')])
## udp-client-server-helper.h (module 'applications'): ns3::ApplicationContainer ns3::UdpTraceClientHelper::Install(ns3::NodeContainer c) [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'c')])
## udp-client-server-helper.h (module 'applications'): void ns3::UdpTraceClientHelper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3UniformVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(ns3::UniformVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UniformVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(double s, double l) [constructor]
cls.add_constructor([param('double', 's'), param('double', 'l')])
## random-variable.h (module 'core'): uint32_t ns3::UniformVariable::GetInteger(uint32_t s, uint32_t l) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 's'), param('uint32_t', 'l')])
## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue(double s, double l) [member function]
cls.add_method('GetValue',
'double',
[param('double', 's'), param('double', 'l')])
return
def register_Ns3UnsafeAttributeList_methods(root_module, cls):
## attribute-list.h (module 'core'): ns3::UnsafeAttributeList::UnsafeAttributeList() [constructor]
cls.add_constructor([])
## attribute-list.h (module 'core'): ns3::UnsafeAttributeList::UnsafeAttributeList(ns3::UnsafeAttributeList const & o) [copy constructor]
cls.add_constructor([param('ns3::UnsafeAttributeList const &', 'o')])
## attribute-list.h (module 'core'): ns3::AttributeList ns3::UnsafeAttributeList::GetSafe(std::string name) const [member function]
cls.add_method('GetSafe',
'ns3::AttributeList',
[param('std::string', 'name')],
is_const=True)
## attribute-list.h (module 'core'): void ns3::UnsafeAttributeList::Set(std::string name, ns3::AttributeValue const & param) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'param')])
return
def register_Ns3V4PingHelper_methods(root_module, cls):
## v4ping-helper.h (module 'applications'): ns3::V4PingHelper::V4PingHelper(ns3::V4PingHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::V4PingHelper const &', 'arg0')])
## v4ping-helper.h (module 'applications'): ns3::V4PingHelper::V4PingHelper(ns3::Ipv4Address remote) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'remote')])
## v4ping-helper.h (module 'applications'): ns3::ApplicationContainer ns3::V4PingHelper::Install(ns3::NodeContainer nodes) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::NodeContainer', 'nodes')],
is_const=True)
## v4ping-helper.h (module 'applications'): ns3::ApplicationContainer ns3::V4PingHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## v4ping-helper.h (module 'applications'): ns3::ApplicationContainer ns3::V4PingHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::ApplicationContainer',
[param('std::string', 'nodeName')],
is_const=True)
## v4ping-helper.h (module 'applications'): void ns3::V4PingHelper::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3WeibullVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(ns3::WeibullVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WeibullVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's')])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')])
return
def register_Ns3ZetaVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(ns3::ZetaVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ZetaVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(double alpha) [constructor]
cls.add_constructor([param('double', 'alpha')])
## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3ZipfVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(ns3::ZipfVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ZipfVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(long int N, double alpha) [constructor]
cls.add_constructor([param('long int', 'N'), param('double', 'alpha')])
## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3ConstantVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(ns3::ConstantVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConstantVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(double c) [constructor]
cls.add_constructor([param('double', 'c')])
## random-variable.h (module 'core'): void ns3::ConstantVariable::SetConstant(double c) [member function]
cls.add_method('SetConstant',
'void',
[param('double', 'c')])
return
def register_Ns3DeterministicVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(ns3::DeterministicVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeterministicVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(double * d, uint32_t c) [constructor]
cls.add_constructor([param('double *', 'd'), param('uint32_t', 'c')])
return
def register_Ns3EmpiricalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable(ns3::EmpiricalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmpiricalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): void ns3::EmpiricalVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
return
def register_Ns3ErlangVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(ns3::ErlangVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ErlangVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(unsigned int k, double lambda) [constructor]
cls.add_constructor([param('unsigned int', 'k'), param('double', 'lambda')])
## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue(unsigned int k, double lambda) const [member function]
cls.add_method('GetValue',
'double',
[param('unsigned int', 'k'), param('double', 'lambda')],
is_const=True)
return
def register_Ns3ExponentialVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(ns3::ExponentialVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ExponentialVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'b')])
return
def register_Ns3GammaVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(ns3::GammaVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GammaVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(double alpha, double beta) [constructor]
cls.add_constructor([param('double', 'alpha'), param('double', 'beta')])
## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue(double alpha, double beta) const [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')],
is_const=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3IntEmpiricalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable(ns3::IntEmpiricalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntEmpiricalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3LogNormalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(ns3::LogNormalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LogNormalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(double mu, double sigma) [constructor]
cls.add_constructor([param('double', 'mu'), param('double', 'sigma')])
return
def register_Ns3NormalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(ns3::NormalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NormalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'v')])
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'v'), param('double', 'b')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3ParetoVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(ns3::ParetoVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParetoVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params) [constructor]
cls.add_constructor([param('std::pair< double, double >', 'params')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params, double b) [constructor]
cls.add_constructor([param('std::pair< double, double >', 'params'), param('double', 'b')])
return
def register_Ns3Scalar_methods(root_module, cls):
## nstime.h (module 'core'): ns3::Scalar::Scalar(ns3::Scalar const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scalar const &', 'arg0')])
## nstime.h (module 'core'): ns3::Scalar::Scalar() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Scalar::Scalar(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Scalar::Scalar(uint32_t v) [constructor]
cls.add_constructor([param('uint32_t', 'v')])
## nstime.h (module 'core'): ns3::Scalar::Scalar(int32_t v) [constructor]
cls.add_constructor([param('int32_t', 'v')])
## nstime.h (module 'core'): ns3::Scalar::Scalar(uint64_t v) [constructor]
cls.add_constructor([param('uint64_t', 'v')])
## nstime.h (module 'core'): ns3::Scalar::Scalar(int64_t v) [constructor]
cls.add_constructor([param('int64_t', 'v')])
## nstime.h (module 'core'): ns3::Scalar::Scalar(ns3::Time t) [constructor]
cls.add_constructor([param('ns3::Time', 't')])
## nstime.h (module 'core'): double ns3::Scalar::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
return
def register_Ns3SeqTsHeader_methods(root_module, cls):
## seq-ts-header.h (module 'applications'): ns3::SeqTsHeader::SeqTsHeader(ns3::SeqTsHeader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SeqTsHeader const &', 'arg0')])
## seq-ts-header.h (module 'applications'): ns3::SeqTsHeader::SeqTsHeader() [constructor]
cls.add_constructor([])
## seq-ts-header.h (module 'applications'): uint32_t ns3::SeqTsHeader::GetSeq() const [member function]
cls.add_method('GetSeq',
'uint32_t',
[],
is_const=True)
## seq-ts-header.h (module 'applications'): ns3::Time ns3::SeqTsHeader::GetTs() const [member function]
cls.add_method('GetTs',
'ns3::Time',
[],
is_const=True)
## seq-ts-header.h (module 'applications'): static ns3::TypeId ns3::SeqTsHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## seq-ts-header.h (module 'applications'): void ns3::SeqTsHeader::SetSeq(uint32_t seq) [member function]
cls.add_method('SetSeq',
'void',
[param('uint32_t', 'seq')])
## seq-ts-header.h (module 'applications'): uint32_t ns3::SeqTsHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
visibility='private', is_virtual=True)
## seq-ts-header.h (module 'applications'): ns3::TypeId ns3::SeqTsHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, visibility='private', is_virtual=True)
## seq-ts-header.h (module 'applications'): uint32_t ns3::SeqTsHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, visibility='private', is_virtual=True)
## seq-ts-header.h (module 'applications'): void ns3::SeqTsHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, visibility='private', is_virtual=True)
## seq-ts-header.h (module 'applications'): void ns3::SeqTsHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3RadvdInterface_Ns3Empty_Ns3DefaultDeleter__lt__ns3RadvdInterface__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >::SimpleRefCount(ns3::SimpleRefCount<ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter< ns3::RadvdInterface > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3RadvdPrefix_Ns3Empty_Ns3DefaultDeleter__lt__ns3RadvdPrefix__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >::SimpleRefCount(ns3::SimpleRefCount<ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter< ns3::RadvdPrefix > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Socket_methods(root_module, cls):
## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Socket const &', 'arg0')])
## socket.h (module 'network'): ns3::Socket::Socket() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function]
cls.add_method('Bind',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Bind() [member function]
cls.add_method('Bind',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function]
cls.add_method('BindToNetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'netdevice')],
is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Close() [member function]
cls.add_method('Close',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function]
cls.add_method('Connect',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function]
cls.add_method('CreateSocket',
'ns3::Ptr< ns3::Socket >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')],
is_static=True)
## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function]
cls.add_method('GetAllowBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function]
cls.add_method('GetBoundNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[])
## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function]
cls.add_method('GetErrno',
'ns3::Socket::SocketErrno',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function]
cls.add_method('GetRxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function]
cls.add_method('GetSockName',
'int',
[param('ns3::Address &', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function]
cls.add_method('GetSocketType',
'ns3::Socket::SocketType',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function]
cls.add_method('GetTxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Listen() [member function]
cls.add_method('Listen',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[])
## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Recv',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function]
cls.add_method('SendTo',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function]
cls.add_method('SendTo',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')])
## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function]
cls.add_method('SetAcceptCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')])
## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function]
cls.add_method('SetAllowBroadcast',
'bool',
[param('bool', 'allowBroadcast')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function]
cls.add_method('SetCloseCallbacks',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')])
## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function]
cls.add_method('SetConnectCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')])
## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function]
cls.add_method('SetDataSentCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')])
## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function]
cls.add_method('SetRecvCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')])
## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function]
cls.add_method('SetRecvPktInfo',
'void',
[param('bool', 'flag')])
## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')])
## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function]
cls.add_method('ShutdownRecv',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function]
cls.add_method('ShutdownSend',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function]
cls.add_method('NotifyConnectionFailed',
'void',
[],
visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function]
cls.add_method('NotifyConnectionRequest',
'bool',
[param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function]
cls.add_method('NotifyConnectionSucceeded',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function]
cls.add_method('NotifyDataRecv',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function]
cls.add_method('NotifyDataSent',
'void',
[param('uint32_t', 'size')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function]
cls.add_method('NotifyErrorClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function]
cls.add_method('NotifyNewConnectionCreated',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function]
cls.add_method('NotifyNormalClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function]
cls.add_method('NotifySend',
'void',
[param('uint32_t', 'spaceAvailable')],
visibility='protected')
return
def register_Ns3SocketAddressTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketAddressTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::Address ns3::SocketAddressTag::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketAddressTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketAddressTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketAddressTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::SetAddress(ns3::Address addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'addr')])
return
def register_Ns3SocketIpTtlTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Application_methods(root_module, cls):
## application.h (module 'network'): ns3::Application::Application(ns3::Application const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Application const &', 'arg0')])
## application.h (module 'network'): ns3::Application::Application() [constructor]
cls.add_constructor([])
## application.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Application::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## application.h (module 'network'): static ns3::TypeId ns3::Application::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## application.h (module 'network'): void ns3::Application::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## application.h (module 'network'): void ns3::Application::SetStartTime(ns3::Time start) [member function]
cls.add_method('SetStartTime',
'void',
[param('ns3::Time', 'start')])
## application.h (module 'network'): void ns3::Application::SetStopTime(ns3::Time stop) [member function]
cls.add_method('SetStopTime',
'void',
[param('ns3::Time', 'stop')])
## application.h (module 'network'): void ns3::Application::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## application.h (module 'network'): void ns3::Application::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## application.h (module 'network'): void ns3::Application::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## application.h (module 'network'): void ns3::Application::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BulkSendApplication_methods(root_module, cls):
## bulk-send-application.h (module 'applications'): ns3::BulkSendApplication::BulkSendApplication(ns3::BulkSendApplication const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BulkSendApplication const &', 'arg0')])
## bulk-send-application.h (module 'applications'): ns3::BulkSendApplication::BulkSendApplication() [constructor]
cls.add_constructor([])
## bulk-send-application.h (module 'applications'): ns3::Ptr<ns3::Socket> ns3::BulkSendApplication::GetSocket() const [member function]
cls.add_method('GetSocket',
'ns3::Ptr< ns3::Socket >',
[],
is_const=True)
## bulk-send-application.h (module 'applications'): static ns3::TypeId ns3::BulkSendApplication::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bulk-send-application.h (module 'applications'): void ns3::BulkSendApplication::SetMaxBytes(uint32_t maxBytes) [member function]
cls.add_method('SetMaxBytes',
'void',
[param('uint32_t', 'maxBytes')])
## bulk-send-application.h (module 'applications'): void ns3::BulkSendApplication::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## bulk-send-application.h (module 'applications'): void ns3::BulkSendApplication::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## bulk-send-application.h (module 'applications'): void ns3::BulkSendApplication::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DataRateChecker_methods(root_module, cls):
## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')])
return
def register_Ns3DataRateValue_methods(root_module, cls):
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')])
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor]
cls.add_constructor([param('ns3::DataRate const &', 'value')])
## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function]
cls.add_method('Get',
'ns3::DataRate',
[],
is_const=True)
## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::DataRate const &', 'value')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::NotifyDeviceAdded(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('NotifyDeviceAdded',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
visibility='private', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OnOffApplication_methods(root_module, cls):
## onoff-application.h (module 'applications'): ns3::OnOffApplication::OnOffApplication(ns3::OnOffApplication const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OnOffApplication const &', 'arg0')])
## onoff-application.h (module 'applications'): ns3::OnOffApplication::OnOffApplication() [constructor]
cls.add_constructor([])
## onoff-application.h (module 'applications'): ns3::Ptr<ns3::Socket> ns3::OnOffApplication::GetSocket() const [member function]
cls.add_method('GetSocket',
'ns3::Ptr< ns3::Socket >',
[],
is_const=True)
## onoff-application.h (module 'applications'): static ns3::TypeId ns3::OnOffApplication::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## onoff-application.h (module 'applications'): void ns3::OnOffApplication::SetMaxBytes(uint32_t maxBytes) [member function]
cls.add_method('SetMaxBytes',
'void',
[param('uint32_t', 'maxBytes')])
## onoff-application.h (module 'applications'): void ns3::OnOffApplication::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## onoff-application.h (module 'applications'): void ns3::OnOffApplication::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## onoff-application.h (module 'applications'): void ns3::OnOffApplication::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3PacketSink_methods(root_module, cls):
## packet-sink.h (module 'applications'): ns3::PacketSink::PacketSink(ns3::PacketSink const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketSink const &', 'arg0')])
## packet-sink.h (module 'applications'): ns3::PacketSink::PacketSink() [constructor]
cls.add_constructor([])
## packet-sink.h (module 'applications'): std::list<ns3::Ptr<ns3::Socket>, std::allocator<ns3::Ptr<ns3::Socket> > > ns3::PacketSink::GetAcceptedSockets() const [member function]
cls.add_method('GetAcceptedSockets',
'std::list< ns3::Ptr< ns3::Socket > >',
[],
is_const=True)
## packet-sink.h (module 'applications'): ns3::Ptr<ns3::Socket> ns3::PacketSink::GetListeningSocket() const [member function]
cls.add_method('GetListeningSocket',
'ns3::Ptr< ns3::Socket >',
[],
is_const=True)
## packet-sink.h (module 'applications'): uint32_t ns3::PacketSink::GetTotalRx() const [member function]
cls.add_method('GetTotalRx',
'uint32_t',
[],
is_const=True)
## packet-sink.h (module 'applications'): static ns3::TypeId ns3::PacketSink::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## packet-sink.h (module 'applications'): void ns3::PacketSink::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## packet-sink.h (module 'applications'): void ns3::PacketSink::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## packet-sink.h (module 'applications'): void ns3::PacketSink::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3Ping6_methods(root_module, cls):
## ping6.h (module 'applications'): ns3::Ping6::Ping6(ns3::Ping6 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ping6 const &', 'arg0')])
## ping6.h (module 'applications'): ns3::Ping6::Ping6() [constructor]
cls.add_constructor([])
## ping6.h (module 'applications'): static ns3::TypeId ns3::Ping6::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ping6.h (module 'applications'): void ns3::Ping6::SetIfIndex(uint32_t ifIndex) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t', 'ifIndex')])
## ping6.h (module 'applications'): void ns3::Ping6::SetLocal(ns3::Ipv6Address ipv6) [member function]
cls.add_method('SetLocal',
'void',
[param('ns3::Ipv6Address', 'ipv6')])
## ping6.h (module 'applications'): void ns3::Ping6::SetRemote(ns3::Ipv6Address ipv6) [member function]
cls.add_method('SetRemote',
'void',
[param('ns3::Ipv6Address', 'ipv6')])
## ping6.h (module 'applications'): void ns3::Ping6::SetRouters(std::vector<ns3::Ipv6Address, std::allocator<ns3::Ipv6Address> > routers) [member function]
cls.add_method('SetRouters',
'void',
[param('std::vector< ns3::Ipv6Address >', 'routers')])
## ping6.h (module 'applications'): void ns3::Ping6::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## ping6.h (module 'applications'): void ns3::Ping6::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## ping6.h (module 'applications'): void ns3::Ping6::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3Radvd_methods(root_module, cls):
## radvd.h (module 'applications'): ns3::Radvd::Radvd(ns3::Radvd const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Radvd const &', 'arg0')])
## radvd.h (module 'applications'): ns3::Radvd::Radvd() [constructor]
cls.add_constructor([])
## radvd.h (module 'applications'): void ns3::Radvd::AddConfiguration(ns3::Ptr<ns3::RadvdInterface> routerInterface) [member function]
cls.add_method('AddConfiguration',
'void',
[param('ns3::Ptr< ns3::RadvdInterface >', 'routerInterface')])
## radvd.h (module 'applications'): static ns3::TypeId ns3::Radvd::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## radvd.h (module 'applications'): ns3::Radvd::MAX_RA_DELAY_TIME [variable]
cls.add_static_attribute('MAX_RA_DELAY_TIME', 'uint32_t const', is_const=True)
## radvd.h (module 'applications'): void ns3::Radvd::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## radvd.h (module 'applications'): void ns3::Radvd::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## radvd.h (module 'applications'): void ns3::Radvd::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3RadvdInterface_methods(root_module, cls):
## radvd-interface.h (module 'applications'): ns3::RadvdInterface::RadvdInterface(ns3::RadvdInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RadvdInterface const &', 'arg0')])
## radvd-interface.h (module 'applications'): ns3::RadvdInterface::RadvdInterface(uint32_t interface) [constructor]
cls.add_constructor([param('uint32_t', 'interface')])
## radvd-interface.h (module 'applications'): ns3::RadvdInterface::RadvdInterface(uint32_t interface, uint32_t maxRtrAdvInterval, uint32_t minRtrAdvInterval) [constructor]
cls.add_constructor([param('uint32_t', 'interface'), param('uint32_t', 'maxRtrAdvInterval'), param('uint32_t', 'minRtrAdvInterval')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::AddPrefix(ns3::Ptr<ns3::RadvdPrefix> routerPrefix) [member function]
cls.add_method('AddPrefix',
'void',
[param('ns3::Ptr< ns3::RadvdPrefix >', 'routerPrefix')])
## radvd-interface.h (module 'applications'): uint8_t ns3::RadvdInterface::GetCurHopLimit() const [member function]
cls.add_method('GetCurHopLimit',
'uint8_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetDefaultLifeTime() const [member function]
cls.add_method('GetDefaultLifeTime',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint8_t ns3::RadvdInterface::GetDefaultPreference() const [member function]
cls.add_method('GetDefaultPreference',
'uint8_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetHomeAgentLifeTime() const [member function]
cls.add_method('GetHomeAgentLifeTime',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetHomeAgentPreference() const [member function]
cls.add_method('GetHomeAgentPreference',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetInterface() const [member function]
cls.add_method('GetInterface',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetLinkMtu() const [member function]
cls.add_method('GetLinkMtu',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetMaxRtrAdvInterval() const [member function]
cls.add_method('GetMaxRtrAdvInterval',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetMinDelayBetweenRAs() const [member function]
cls.add_method('GetMinDelayBetweenRAs',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetMinRtrAdvInterval() const [member function]
cls.add_method('GetMinRtrAdvInterval',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): std::list<ns3::Ptr<ns3::RadvdPrefix>, std::allocator<ns3::Ptr<ns3::RadvdPrefix> > > ns3::RadvdInterface::GetPrefixes() const [member function]
cls.add_method('GetPrefixes',
'std::list< ns3::Ptr< ns3::RadvdPrefix > >',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetReachableTime() const [member function]
cls.add_method('GetReachableTime',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): uint32_t ns3::RadvdInterface::GetRetransTimer() const [member function]
cls.add_method('GetRetransTimer',
'uint32_t',
[],
is_const=True)
## radvd-interface.h (module 'applications'): bool ns3::RadvdInterface::IsHomeAgentFlag() const [member function]
cls.add_method('IsHomeAgentFlag',
'bool',
[],
is_const=True)
## radvd-interface.h (module 'applications'): bool ns3::RadvdInterface::IsHomeAgentInfo() const [member function]
cls.add_method('IsHomeAgentInfo',
'bool',
[],
is_const=True)
## radvd-interface.h (module 'applications'): bool ns3::RadvdInterface::IsIntervalOpt() const [member function]
cls.add_method('IsIntervalOpt',
'bool',
[],
is_const=True)
## radvd-interface.h (module 'applications'): bool ns3::RadvdInterface::IsManagedFlag() const [member function]
cls.add_method('IsManagedFlag',
'bool',
[],
is_const=True)
## radvd-interface.h (module 'applications'): bool ns3::RadvdInterface::IsMobRtrSupportFlag() const [member function]
cls.add_method('IsMobRtrSupportFlag',
'bool',
[],
is_const=True)
## radvd-interface.h (module 'applications'): bool ns3::RadvdInterface::IsOtherConfigFlag() const [member function]
cls.add_method('IsOtherConfigFlag',
'bool',
[],
is_const=True)
## radvd-interface.h (module 'applications'): bool ns3::RadvdInterface::IsSendAdvert() const [member function]
cls.add_method('IsSendAdvert',
'bool',
[],
is_const=True)
## radvd-interface.h (module 'applications'): bool ns3::RadvdInterface::IsSourceLLAddress() const [member function]
cls.add_method('IsSourceLLAddress',
'bool',
[],
is_const=True)
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetCurHopLimit(uint8_t curHopLimit) [member function]
cls.add_method('SetCurHopLimit',
'void',
[param('uint8_t', 'curHopLimit')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetDefaultLifeTime(uint32_t defaultLifeTime) [member function]
cls.add_method('SetDefaultLifeTime',
'void',
[param('uint32_t', 'defaultLifeTime')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetDefaultPreference(uint8_t defaultPreference) [member function]
cls.add_method('SetDefaultPreference',
'void',
[param('uint8_t', 'defaultPreference')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetHomeAgentFlag(bool homeAgentFlag) [member function]
cls.add_method('SetHomeAgentFlag',
'void',
[param('bool', 'homeAgentFlag')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetHomeAgentInfo(bool homeAgentFlag) [member function]
cls.add_method('SetHomeAgentInfo',
'void',
[param('bool', 'homeAgentFlag')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetHomeAgentLifeTime(uint32_t homeAgentLifeTime) [member function]
cls.add_method('SetHomeAgentLifeTime',
'void',
[param('uint32_t', 'homeAgentLifeTime')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetHomeAgentPreference(uint32_t homeAgentPreference) [member function]
cls.add_method('SetHomeAgentPreference',
'void',
[param('uint32_t', 'homeAgentPreference')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetIntervalOpt(bool intervalOpt) [member function]
cls.add_method('SetIntervalOpt',
'void',
[param('bool', 'intervalOpt')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetLinkMtu(uint32_t linkMtu) [member function]
cls.add_method('SetLinkMtu',
'void',
[param('uint32_t', 'linkMtu')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetManagedFlag(bool managedFlag) [member function]
cls.add_method('SetManagedFlag',
'void',
[param('bool', 'managedFlag')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetMaxRtrAdvInterval(uint32_t maxRtrAdvInterval) [member function]
cls.add_method('SetMaxRtrAdvInterval',
'void',
[param('uint32_t', 'maxRtrAdvInterval')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetMinDelayBetweenRAs(uint32_t minDelayBetweenRAs) [member function]
cls.add_method('SetMinDelayBetweenRAs',
'void',
[param('uint32_t', 'minDelayBetweenRAs')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetMinRtrAdvInterval(uint32_t minRtrAdvInterval) [member function]
cls.add_method('SetMinRtrAdvInterval',
'void',
[param('uint32_t', 'minRtrAdvInterval')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetMobRtrSupportFlag(bool mobRtrSupportFlag) [member function]
cls.add_method('SetMobRtrSupportFlag',
'void',
[param('bool', 'mobRtrSupportFlag')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetOtherConfigFlag(bool otherConfigFlag) [member function]
cls.add_method('SetOtherConfigFlag',
'void',
[param('bool', 'otherConfigFlag')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetReachableTime(uint32_t reachableTime) [member function]
cls.add_method('SetReachableTime',
'void',
[param('uint32_t', 'reachableTime')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetRetransTimer(uint32_t retransTimer) [member function]
cls.add_method('SetRetransTimer',
'void',
[param('uint32_t', 'retransTimer')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetSendAdvert(bool sendAdvert) [member function]
cls.add_method('SetSendAdvert',
'void',
[param('bool', 'sendAdvert')])
## radvd-interface.h (module 'applications'): void ns3::RadvdInterface::SetSourceLLAddress(bool sourceLLAddress) [member function]
cls.add_method('SetSourceLLAddress',
'void',
[param('bool', 'sourceLLAddress')])
return
def register_Ns3RadvdPrefix_methods(root_module, cls):
## radvd-prefix.h (module 'applications'): ns3::RadvdPrefix::RadvdPrefix(ns3::RadvdPrefix const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RadvdPrefix const &', 'arg0')])
## radvd-prefix.h (module 'applications'): ns3::RadvdPrefix::RadvdPrefix(ns3::Ipv6Address network, uint8_t prefixLength, uint32_t preferredLifeTime=604800, uint32_t validLifeTime=2592000, bool onLinkFlag=true, bool autonomousFlag=true, bool routerAddrFlag=false) [constructor]
cls.add_constructor([param('ns3::Ipv6Address', 'network'), param('uint8_t', 'prefixLength'), param('uint32_t', 'preferredLifeTime', default_value='604800'), param('uint32_t', 'validLifeTime', default_value='2592000'), param('bool', 'onLinkFlag', default_value='true'), param('bool', 'autonomousFlag', default_value='true'), param('bool', 'routerAddrFlag', default_value='false')])
## radvd-prefix.h (module 'applications'): ns3::Ipv6Address ns3::RadvdPrefix::GetNetwork() const [member function]
cls.add_method('GetNetwork',
'ns3::Ipv6Address',
[],
is_const=True)
## radvd-prefix.h (module 'applications'): uint32_t ns3::RadvdPrefix::GetPreferredLifeTime() const [member function]
cls.add_method('GetPreferredLifeTime',
'uint32_t',
[],
is_const=True)
## radvd-prefix.h (module 'applications'): uint8_t ns3::RadvdPrefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## radvd-prefix.h (module 'applications'): uint32_t ns3::RadvdPrefix::GetValidLifeTime() const [member function]
cls.add_method('GetValidLifeTime',
'uint32_t',
[],
is_const=True)
## radvd-prefix.h (module 'applications'): bool ns3::RadvdPrefix::IsAutonomousFlag() const [member function]
cls.add_method('IsAutonomousFlag',
'bool',
[],
is_const=True)
## radvd-prefix.h (module 'applications'): bool ns3::RadvdPrefix::IsOnLinkFlag() const [member function]
cls.add_method('IsOnLinkFlag',
'bool',
[],
is_const=True)
## radvd-prefix.h (module 'applications'): bool ns3::RadvdPrefix::IsRouterAddrFlag() const [member function]
cls.add_method('IsRouterAddrFlag',
'bool',
[],
is_const=True)
## radvd-prefix.h (module 'applications'): void ns3::RadvdPrefix::SetAutonomousFlag(bool autonomousFlag) [member function]
cls.add_method('SetAutonomousFlag',
'void',
[param('bool', 'autonomousFlag')])
## radvd-prefix.h (module 'applications'): void ns3::RadvdPrefix::SetNetwork(ns3::Ipv6Address network) [member function]
cls.add_method('SetNetwork',
'void',
[param('ns3::Ipv6Address', 'network')])
## radvd-prefix.h (module 'applications'): void ns3::RadvdPrefix::SetOnLinkFlag(bool onLinkFlag) [member function]
cls.add_method('SetOnLinkFlag',
'void',
[param('bool', 'onLinkFlag')])
## radvd-prefix.h (module 'applications'): void ns3::RadvdPrefix::SetPreferredLifeTime(uint32_t preferredLifeTime) [member function]
cls.add_method('SetPreferredLifeTime',
'void',
[param('uint32_t', 'preferredLifeTime')])
## radvd-prefix.h (module 'applications'): void ns3::RadvdPrefix::SetPrefixLength(uint8_t prefixLength) [member function]
cls.add_method('SetPrefixLength',
'void',
[param('uint8_t', 'prefixLength')])
## radvd-prefix.h (module 'applications'): void ns3::RadvdPrefix::SetRouterAddrFlag(bool routerAddrFlag) [member function]
cls.add_method('SetRouterAddrFlag',
'void',
[param('bool', 'routerAddrFlag')])
## radvd-prefix.h (module 'applications'): void ns3::RadvdPrefix::SetValidLifeTime(uint32_t validLifeTime) [member function]
cls.add_method('SetValidLifeTime',
'void',
[param('uint32_t', 'validLifeTime')])
return
def register_Ns3RandomVariableChecker_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker(ns3::RandomVariableChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableChecker const &', 'arg0')])
return
def register_Ns3RandomVariableValue_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariableValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableValue const &', 'arg0')])
## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariable const & value) [constructor]
cls.add_constructor([param('ns3::RandomVariable const &', 'value')])
## random-variable.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::RandomVariableValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## random-variable.h (module 'core'): bool ns3::RandomVariableValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## random-variable.h (module 'core'): ns3::RandomVariable ns3::RandomVariableValue::Get() const [member function]
cls.add_method('Get',
'ns3::RandomVariable',
[],
is_const=True)
## random-variable.h (module 'core'): std::string ns3::RandomVariableValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## random-variable.h (module 'core'): void ns3::RandomVariableValue::Set(ns3::RandomVariable const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::RandomVariable const &', 'value')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UdpClient_methods(root_module, cls):
## udp-client.h (module 'applications'): ns3::UdpClient::UdpClient(ns3::UdpClient const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpClient const &', 'arg0')])
## udp-client.h (module 'applications'): ns3::UdpClient::UdpClient() [constructor]
cls.add_constructor([])
## udp-client.h (module 'applications'): static ns3::TypeId ns3::UdpClient::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## udp-client.h (module 'applications'): void ns3::UdpClient::SetRemote(ns3::Ipv4Address ip, uint16_t port) [member function]
cls.add_method('SetRemote',
'void',
[param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port')])
## udp-client.h (module 'applications'): void ns3::UdpClient::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## udp-client.h (module 'applications'): void ns3::UdpClient::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## udp-client.h (module 'applications'): void ns3::UdpClient::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3UdpEchoClient_methods(root_module, cls):
## udp-echo-client.h (module 'applications'): ns3::UdpEchoClient::UdpEchoClient(ns3::UdpEchoClient const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpEchoClient const &', 'arg0')])
## udp-echo-client.h (module 'applications'): ns3::UdpEchoClient::UdpEchoClient() [constructor]
cls.add_constructor([])
## udp-echo-client.h (module 'applications'): uint32_t ns3::UdpEchoClient::GetDataSize() const [member function]
cls.add_method('GetDataSize',
'uint32_t',
[],
is_const=True)
## udp-echo-client.h (module 'applications'): static ns3::TypeId ns3::UdpEchoClient::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## udp-echo-client.h (module 'applications'): void ns3::UdpEchoClient::SetDataSize(uint32_t dataSize) [member function]
cls.add_method('SetDataSize',
'void',
[param('uint32_t', 'dataSize')])
## udp-echo-client.h (module 'applications'): void ns3::UdpEchoClient::SetFill(std::string fill) [member function]
cls.add_method('SetFill',
'void',
[param('std::string', 'fill')])
## udp-echo-client.h (module 'applications'): void ns3::UdpEchoClient::SetFill(uint8_t fill, uint32_t dataSize) [member function]
cls.add_method('SetFill',
'void',
[param('uint8_t', 'fill'), param('uint32_t', 'dataSize')])
## udp-echo-client.h (module 'applications'): void ns3::UdpEchoClient::SetFill(uint8_t * fill, uint32_t fillSize, uint32_t dataSize) [member function]
cls.add_method('SetFill',
'void',
[param('uint8_t *', 'fill'), param('uint32_t', 'fillSize'), param('uint32_t', 'dataSize')])
## udp-echo-client.h (module 'applications'): void ns3::UdpEchoClient::SetRemote(ns3::Ipv4Address ip, uint16_t port) [member function]
cls.add_method('SetRemote',
'void',
[param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port')])
## udp-echo-client.h (module 'applications'): void ns3::UdpEchoClient::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## udp-echo-client.h (module 'applications'): void ns3::UdpEchoClient::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## udp-echo-client.h (module 'applications'): void ns3::UdpEchoClient::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3UdpEchoServer_methods(root_module, cls):
## udp-echo-server.h (module 'applications'): ns3::UdpEchoServer::UdpEchoServer(ns3::UdpEchoServer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpEchoServer const &', 'arg0')])
## udp-echo-server.h (module 'applications'): ns3::UdpEchoServer::UdpEchoServer() [constructor]
cls.add_constructor([])
## udp-echo-server.h (module 'applications'): static ns3::TypeId ns3::UdpEchoServer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## udp-echo-server.h (module 'applications'): void ns3::UdpEchoServer::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## udp-echo-server.h (module 'applications'): void ns3::UdpEchoServer::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## udp-echo-server.h (module 'applications'): void ns3::UdpEchoServer::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3UdpServer_methods(root_module, cls):
## udp-server.h (module 'applications'): ns3::UdpServer::UdpServer(ns3::UdpServer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpServer const &', 'arg0')])
## udp-server.h (module 'applications'): ns3::UdpServer::UdpServer() [constructor]
cls.add_constructor([])
## udp-server.h (module 'applications'): uint32_t ns3::UdpServer::GetLost() const [member function]
cls.add_method('GetLost',
'uint32_t',
[],
is_const=True)
## udp-server.h (module 'applications'): uint16_t ns3::UdpServer::GetPacketWindowSize() const [member function]
cls.add_method('GetPacketWindowSize',
'uint16_t',
[],
is_const=True)
## udp-server.h (module 'applications'): uint32_t ns3::UdpServer::GetReceived() const [member function]
cls.add_method('GetReceived',
'uint32_t',
[],
is_const=True)
## udp-server.h (module 'applications'): static ns3::TypeId ns3::UdpServer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## udp-server.h (module 'applications'): void ns3::UdpServer::SetPacketWindowSize(uint16_t size) [member function]
cls.add_method('SetPacketWindowSize',
'void',
[param('uint16_t', 'size')])
## udp-server.h (module 'applications'): void ns3::UdpServer::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## udp-server.h (module 'applications'): void ns3::UdpServer::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## udp-server.h (module 'applications'): void ns3::UdpServer::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3UdpTraceClient_methods(root_module, cls):
## udp-trace-client.h (module 'applications'): ns3::UdpTraceClient::UdpTraceClient(ns3::UdpTraceClient const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UdpTraceClient const &', 'arg0')])
## udp-trace-client.h (module 'applications'): ns3::UdpTraceClient::UdpTraceClient() [constructor]
cls.add_constructor([])
## udp-trace-client.h (module 'applications'): ns3::UdpTraceClient::UdpTraceClient(ns3::Ipv4Address ip, uint16_t port, char * traceFile) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port'), param('char *', 'traceFile')])
## udp-trace-client.h (module 'applications'): uint16_t ns3::UdpTraceClient::GetMaxPacketSize() [member function]
cls.add_method('GetMaxPacketSize',
'uint16_t',
[])
## udp-trace-client.h (module 'applications'): static ns3::TypeId ns3::UdpTraceClient::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## udp-trace-client.h (module 'applications'): void ns3::UdpTraceClient::SetMaxPacketSize(uint16_t maxPacketSize) [member function]
cls.add_method('SetMaxPacketSize',
'void',
[param('uint16_t', 'maxPacketSize')])
## udp-trace-client.h (module 'applications'): void ns3::UdpTraceClient::SetRemote(ns3::Ipv4Address ip, uint16_t port) [member function]
cls.add_method('SetRemote',
'void',
[param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port')])
## udp-trace-client.h (module 'applications'): void ns3::UdpTraceClient::SetTraceFile(std::string filename) [member function]
cls.add_method('SetTraceFile',
'void',
[param('std::string', 'filename')])
## udp-trace-client.h (module 'applications'): void ns3::UdpTraceClient::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## udp-trace-client.h (module 'applications'): void ns3::UdpTraceClient::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## udp-trace-client.h (module 'applications'): void ns3::UdpTraceClient::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3V4Ping_methods(root_module, cls):
## v4ping.h (module 'applications'): ns3::V4Ping::V4Ping(ns3::V4Ping const & arg0) [copy constructor]
cls.add_constructor([param('ns3::V4Ping const &', 'arg0')])
## v4ping.h (module 'applications'): ns3::V4Ping::V4Ping() [constructor]
cls.add_constructor([])
## v4ping.h (module 'applications'): static ns3::TypeId ns3::V4Ping::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## v4ping.h (module 'applications'): void ns3::V4Ping::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## v4ping.h (module 'applications'): void ns3::V4Ping::StartApplication() [member function]
cls.add_method('StartApplication',
'void',
[],
visibility='private', is_virtual=True)
## v4ping.h (module 'applications'): void ns3::V4Ping::StopApplication() [member function]
cls.add_method('StopApplication',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
gpl-2.0
|
0jpq0/kbengine
|
kbe/src/lib/python/Lib/test/test_calendar.py
|
99
|
39625
|
import calendar
import unittest
from test import support
from test.script_helper import assert_python_ok
import time
import locale
import sys
import datetime
result_2004_01_text = """
January 2004
Mo Tu We Th Fr Sa Su
1 2 3 4
5 6 7 8 9 10 11
12 13 14 15 16 17 18
19 20 21 22 23 24 25
26 27 28 29 30 31
"""
result_2004_text = """
2004
January February March
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 1 2 3 4 5 6 7
5 6 7 8 9 10 11 2 3 4 5 6 7 8 8 9 10 11 12 13 14
12 13 14 15 16 17 18 9 10 11 12 13 14 15 15 16 17 18 19 20 21
19 20 21 22 23 24 25 16 17 18 19 20 21 22 22 23 24 25 26 27 28
26 27 28 29 30 31 23 24 25 26 27 28 29 29 30 31
April May June
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 2 1 2 3 4 5 6
5 6 7 8 9 10 11 3 4 5 6 7 8 9 7 8 9 10 11 12 13
12 13 14 15 16 17 18 10 11 12 13 14 15 16 14 15 16 17 18 19 20
19 20 21 22 23 24 25 17 18 19 20 21 22 23 21 22 23 24 25 26 27
26 27 28 29 30 24 25 26 27 28 29 30 28 29 30
31
July August September
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 1 2 3 4 5
5 6 7 8 9 10 11 2 3 4 5 6 7 8 6 7 8 9 10 11 12
12 13 14 15 16 17 18 9 10 11 12 13 14 15 13 14 15 16 17 18 19
19 20 21 22 23 24 25 16 17 18 19 20 21 22 20 21 22 23 24 25 26
26 27 28 29 30 31 23 24 25 26 27 28 29 27 28 29 30
30 31
October November December
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 1 2 3 4 5 6 7 1 2 3 4 5
4 5 6 7 8 9 10 8 9 10 11 12 13 14 6 7 8 9 10 11 12
11 12 13 14 15 16 17 15 16 17 18 19 20 21 13 14 15 16 17 18 19
18 19 20 21 22 23 24 22 23 24 25 26 27 28 20 21 22 23 24 25 26
25 26 27 28 29 30 31 29 30 27 28 29 30 31
"""
result_2004_html = """
<?xml version="1.0" encoding="%(e)s"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=%(e)s" />
<link rel="stylesheet" type="text/css" href="calendar.css" />
<title>Calendar for 2004</title>
</head>
<body>
<table border="0" cellpadding="0" cellspacing="0" class="year">
<tr><th colspan="3" class="year">2004</th></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">January</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">February</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">March</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
<tr><td class="mon">29</td><td class="tue">30</td><td class="wed">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">April</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">May</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sat">1</td><td class="sun">2</td></tr>
<tr><td class="mon">3</td><td class="tue">4</td><td class="wed">5</td><td class="thu">6</td><td class="fri">7</td><td class="sat">8</td><td class="sun">9</td></tr>
<tr><td class="mon">10</td><td class="tue">11</td><td class="wed">12</td><td class="thu">13</td><td class="fri">14</td><td class="sat">15</td><td class="sun">16</td></tr>
<tr><td class="mon">17</td><td class="tue">18</td><td class="wed">19</td><td class="thu">20</td><td class="fri">21</td><td class="sat">22</td><td class="sun">23</td></tr>
<tr><td class="mon">24</td><td class="tue">25</td><td class="wed">26</td><td class="thu">27</td><td class="fri">28</td><td class="sat">29</td><td class="sun">30</td></tr>
<tr><td class="mon">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">June</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
<tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
<tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
<tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
<tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">July</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">August</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
<tr><td class="mon">30</td><td class="tue">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">September</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">October</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
<tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
<tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
<tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
<tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="fri">29</td><td class="sat">30</td><td class="sun">31</td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">November</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
<tr><td class="mon">29</td><td class="tue">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">December</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="fri">31</td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr></table></body>
</html>
"""
result_2004_days = [
[[[0, 0, 0, 1, 2, 3, 4],
[5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18],
[19, 20, 21, 22, 23, 24, 25],
[26, 27, 28, 29, 30, 31, 0]],
[[0, 0, 0, 0, 0, 0, 1],
[2, 3, 4, 5, 6, 7, 8],
[9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22],
[23, 24, 25, 26, 27, 28, 29]],
[[1, 2, 3, 4, 5, 6, 7],
[8, 9, 10, 11, 12, 13, 14],
[15, 16, 17, 18, 19, 20, 21],
[22, 23, 24, 25, 26, 27, 28],
[29, 30, 31, 0, 0, 0, 0]]],
[[[0, 0, 0, 1, 2, 3, 4],
[5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18],
[19, 20, 21, 22, 23, 24, 25],
[26, 27, 28, 29, 30, 0, 0]],
[[0, 0, 0, 0, 0, 1, 2],
[3, 4, 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14, 15, 16],
[17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30],
[31, 0, 0, 0, 0, 0, 0]],
[[0, 1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13],
[14, 15, 16, 17, 18, 19, 20],
[21, 22, 23, 24, 25, 26, 27],
[28, 29, 30, 0, 0, 0, 0]]],
[[[0, 0, 0, 1, 2, 3, 4],
[5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18],
[19, 20, 21, 22, 23, 24, 25],
[26, 27, 28, 29, 30, 31, 0]],
[[0, 0, 0, 0, 0, 0, 1],
[2, 3, 4, 5, 6, 7, 8],
[9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22],
[23, 24, 25, 26, 27, 28, 29],
[30, 31, 0, 0, 0, 0, 0]],
[[0, 0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26],
[27, 28, 29, 30, 0, 0, 0]]],
[[[0, 0, 0, 0, 1, 2, 3],
[4, 5, 6, 7, 8, 9, 10],
[11, 12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23, 24],
[25, 26, 27, 28, 29, 30, 31]],
[[1, 2, 3, 4, 5, 6, 7],
[8, 9, 10, 11, 12, 13, 14],
[15, 16, 17, 18, 19, 20, 21],
[22, 23, 24, 25, 26, 27, 28],
[29, 30, 0, 0, 0, 0, 0]],
[[0, 0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26],
[27, 28, 29, 30, 31, 0, 0]]]
]
result_2004_dates = \
[[['12/29/03 12/30/03 12/31/03 01/01/04 01/02/04 01/03/04 01/04/04',
'01/05/04 01/06/04 01/07/04 01/08/04 01/09/04 01/10/04 01/11/04',
'01/12/04 01/13/04 01/14/04 01/15/04 01/16/04 01/17/04 01/18/04',
'01/19/04 01/20/04 01/21/04 01/22/04 01/23/04 01/24/04 01/25/04',
'01/26/04 01/27/04 01/28/04 01/29/04 01/30/04 01/31/04 02/01/04'],
['01/26/04 01/27/04 01/28/04 01/29/04 01/30/04 01/31/04 02/01/04',
'02/02/04 02/03/04 02/04/04 02/05/04 02/06/04 02/07/04 02/08/04',
'02/09/04 02/10/04 02/11/04 02/12/04 02/13/04 02/14/04 02/15/04',
'02/16/04 02/17/04 02/18/04 02/19/04 02/20/04 02/21/04 02/22/04',
'02/23/04 02/24/04 02/25/04 02/26/04 02/27/04 02/28/04 02/29/04'],
['03/01/04 03/02/04 03/03/04 03/04/04 03/05/04 03/06/04 03/07/04',
'03/08/04 03/09/04 03/10/04 03/11/04 03/12/04 03/13/04 03/14/04',
'03/15/04 03/16/04 03/17/04 03/18/04 03/19/04 03/20/04 03/21/04',
'03/22/04 03/23/04 03/24/04 03/25/04 03/26/04 03/27/04 03/28/04',
'03/29/04 03/30/04 03/31/04 04/01/04 04/02/04 04/03/04 04/04/04']],
[['03/29/04 03/30/04 03/31/04 04/01/04 04/02/04 04/03/04 04/04/04',
'04/05/04 04/06/04 04/07/04 04/08/04 04/09/04 04/10/04 04/11/04',
'04/12/04 04/13/04 04/14/04 04/15/04 04/16/04 04/17/04 04/18/04',
'04/19/04 04/20/04 04/21/04 04/22/04 04/23/04 04/24/04 04/25/04',
'04/26/04 04/27/04 04/28/04 04/29/04 04/30/04 05/01/04 05/02/04'],
['04/26/04 04/27/04 04/28/04 04/29/04 04/30/04 05/01/04 05/02/04',
'05/03/04 05/04/04 05/05/04 05/06/04 05/07/04 05/08/04 05/09/04',
'05/10/04 05/11/04 05/12/04 05/13/04 05/14/04 05/15/04 05/16/04',
'05/17/04 05/18/04 05/19/04 05/20/04 05/21/04 05/22/04 05/23/04',
'05/24/04 05/25/04 05/26/04 05/27/04 05/28/04 05/29/04 05/30/04',
'05/31/04 06/01/04 06/02/04 06/03/04 06/04/04 06/05/04 06/06/04'],
['05/31/04 06/01/04 06/02/04 06/03/04 06/04/04 06/05/04 06/06/04',
'06/07/04 06/08/04 06/09/04 06/10/04 06/11/04 06/12/04 06/13/04',
'06/14/04 06/15/04 06/16/04 06/17/04 06/18/04 06/19/04 06/20/04',
'06/21/04 06/22/04 06/23/04 06/24/04 06/25/04 06/26/04 06/27/04',
'06/28/04 06/29/04 06/30/04 07/01/04 07/02/04 07/03/04 07/04/04']],
[['06/28/04 06/29/04 06/30/04 07/01/04 07/02/04 07/03/04 07/04/04',
'07/05/04 07/06/04 07/07/04 07/08/04 07/09/04 07/10/04 07/11/04',
'07/12/04 07/13/04 07/14/04 07/15/04 07/16/04 07/17/04 07/18/04',
'07/19/04 07/20/04 07/21/04 07/22/04 07/23/04 07/24/04 07/25/04',
'07/26/04 07/27/04 07/28/04 07/29/04 07/30/04 07/31/04 08/01/04'],
['07/26/04 07/27/04 07/28/04 07/29/04 07/30/04 07/31/04 08/01/04',
'08/02/04 08/03/04 08/04/04 08/05/04 08/06/04 08/07/04 08/08/04',
'08/09/04 08/10/04 08/11/04 08/12/04 08/13/04 08/14/04 08/15/04',
'08/16/04 08/17/04 08/18/04 08/19/04 08/20/04 08/21/04 08/22/04',
'08/23/04 08/24/04 08/25/04 08/26/04 08/27/04 08/28/04 08/29/04',
'08/30/04 08/31/04 09/01/04 09/02/04 09/03/04 09/04/04 09/05/04'],
['08/30/04 08/31/04 09/01/04 09/02/04 09/03/04 09/04/04 09/05/04',
'09/06/04 09/07/04 09/08/04 09/09/04 09/10/04 09/11/04 09/12/04',
'09/13/04 09/14/04 09/15/04 09/16/04 09/17/04 09/18/04 09/19/04',
'09/20/04 09/21/04 09/22/04 09/23/04 09/24/04 09/25/04 09/26/04',
'09/27/04 09/28/04 09/29/04 09/30/04 10/01/04 10/02/04 10/03/04']],
[['09/27/04 09/28/04 09/29/04 09/30/04 10/01/04 10/02/04 10/03/04',
'10/04/04 10/05/04 10/06/04 10/07/04 10/08/04 10/09/04 10/10/04',
'10/11/04 10/12/04 10/13/04 10/14/04 10/15/04 10/16/04 10/17/04',
'10/18/04 10/19/04 10/20/04 10/21/04 10/22/04 10/23/04 10/24/04',
'10/25/04 10/26/04 10/27/04 10/28/04 10/29/04 10/30/04 10/31/04'],
['11/01/04 11/02/04 11/03/04 11/04/04 11/05/04 11/06/04 11/07/04',
'11/08/04 11/09/04 11/10/04 11/11/04 11/12/04 11/13/04 11/14/04',
'11/15/04 11/16/04 11/17/04 11/18/04 11/19/04 11/20/04 11/21/04',
'11/22/04 11/23/04 11/24/04 11/25/04 11/26/04 11/27/04 11/28/04',
'11/29/04 11/30/04 12/01/04 12/02/04 12/03/04 12/04/04 12/05/04'],
['11/29/04 11/30/04 12/01/04 12/02/04 12/03/04 12/04/04 12/05/04',
'12/06/04 12/07/04 12/08/04 12/09/04 12/10/04 12/11/04 12/12/04',
'12/13/04 12/14/04 12/15/04 12/16/04 12/17/04 12/18/04 12/19/04',
'12/20/04 12/21/04 12/22/04 12/23/04 12/24/04 12/25/04 12/26/04',
'12/27/04 12/28/04 12/29/04 12/30/04 12/31/04 01/01/05 01/02/05']]]
class OutputTestCase(unittest.TestCase):
def normalize_calendar(self, s):
# Filters out locale dependent strings
def neitherspacenordigit(c):
return not c.isspace() and not c.isdigit()
lines = []
for line in s.splitlines(keepends=False):
# Drop texts, as they are locale dependent
if line and not filter(neitherspacenordigit, line):
lines.append(line)
return lines
def check_htmlcalendar_encoding(self, req, res):
cal = calendar.HTMLCalendar()
self.assertEqual(
cal.formatyearpage(2004, encoding=req).strip(b' \t\n'),
(result_2004_html % {'e': res}).strip(' \t\n').encode(res)
)
def test_output(self):
self.assertEqual(
self.normalize_calendar(calendar.calendar(2004)),
self.normalize_calendar(result_2004_text)
)
def test_output_textcalendar(self):
self.assertEqual(
calendar.TextCalendar().formatyear(2004).strip(),
result_2004_text.strip()
)
def test_output_htmlcalendar_encoding_ascii(self):
self.check_htmlcalendar_encoding('ascii', 'ascii')
def test_output_htmlcalendar_encoding_utf8(self):
self.check_htmlcalendar_encoding('utf-8', 'utf-8')
def test_output_htmlcalendar_encoding_default(self):
self.check_htmlcalendar_encoding(None, sys.getdefaultencoding())
def test_yeardatescalendar(self):
def shrink(cal):
return [[[' '.join('{:02d}/{:02d}/{}'.format(
d.month, d.day, str(d.year)[-2:]) for d in z)
for z in y] for y in x] for x in cal]
self.assertEqual(
shrink(calendar.Calendar().yeardatescalendar(2004)),
result_2004_dates
)
def test_yeardayscalendar(self):
self.assertEqual(
calendar.Calendar().yeardayscalendar(2004),
result_2004_days
)
def test_formatweekheader_short(self):
self.assertEqual(
calendar.TextCalendar().formatweekheader(2),
'Mo Tu We Th Fr Sa Su'
)
def test_formatweekheader_long(self):
self.assertEqual(
calendar.TextCalendar().formatweekheader(9),
' Monday Tuesday Wednesday Thursday '
' Friday Saturday Sunday '
)
def test_formatmonth(self):
self.assertEqual(
calendar.TextCalendar().formatmonth(2004, 1).strip(),
result_2004_01_text.strip()
)
def test_formatmonthname_with_year(self):
self.assertEqual(
calendar.HTMLCalendar().formatmonthname(2004, 1, withyear=True),
'<tr><th colspan="7" class="month">January 2004</th></tr>'
)
def test_formatmonthname_without_year(self):
self.assertEqual(
calendar.HTMLCalendar().formatmonthname(2004, 1, withyear=False),
'<tr><th colspan="7" class="month">January</th></tr>'
)
def test_prweek(self):
with support.captured_stdout() as out:
week = [(1,0), (2,1), (3,2), (4,3), (5,4), (6,5), (7,6)]
calendar.TextCalendar().prweek(week, 1)
self.assertEqual(out.getvalue().strip(), "1 2 3 4 5 6 7")
def test_prmonth(self):
with support.captured_stdout() as out:
calendar.TextCalendar().prmonth(2004, 1)
output = out.getvalue().strip()
self.assertEqual(output, result_2004_01_text.strip())
def test_pryear(self):
with support.captured_stdout() as out:
calendar.TextCalendar().pryear(2004)
self.assertEqual(out.getvalue().strip(), result_2004_text.strip())
def test_format(self):
with support.captured_stdout() as out:
calendar.format(["1", "2", "3"], colwidth=3, spacing=1)
self.assertEqual(out.getvalue().strip(), "1 2 3")
class CalendarTestCase(unittest.TestCase):
def test_isleap(self):
# Make sure that the return is right for a few years, and
# ensure that the return values are 1 or 0, not just true or
# false (see SF bug #485794). Specific additional tests may
# be appropriate; this tests a single "cycle".
self.assertEqual(calendar.isleap(2000), 1)
self.assertEqual(calendar.isleap(2001), 0)
self.assertEqual(calendar.isleap(2002), 0)
self.assertEqual(calendar.isleap(2003), 0)
def test_setfirstweekday(self):
self.assertRaises(TypeError, calendar.setfirstweekday, 'flabber')
self.assertRaises(ValueError, calendar.setfirstweekday, -1)
self.assertRaises(ValueError, calendar.setfirstweekday, 200)
orig = calendar.firstweekday()
calendar.setfirstweekday(calendar.SUNDAY)
self.assertEqual(calendar.firstweekday(), calendar.SUNDAY)
calendar.setfirstweekday(calendar.MONDAY)
self.assertEqual(calendar.firstweekday(), calendar.MONDAY)
calendar.setfirstweekday(orig)
def test_illegal_weekday_reported(self):
with self.assertRaisesRegex(calendar.IllegalWeekdayError, '123'):
calendar.setfirstweekday(123)
def test_enumerate_weekdays(self):
self.assertRaises(IndexError, calendar.day_abbr.__getitem__, -10)
self.assertRaises(IndexError, calendar.day_name.__getitem__, 10)
self.assertEqual(len([d for d in calendar.day_abbr]), 7)
def test_days(self):
for attr in "day_name", "day_abbr":
value = getattr(calendar, attr)
self.assertEqual(len(value), 7)
self.assertEqual(len(value[:]), 7)
# ensure they're all unique
self.assertEqual(len(set(value)), 7)
# verify it "acts like a sequence" in two forms of iteration
self.assertEqual(value[::-1], list(reversed(value)))
def test_months(self):
for attr in "month_name", "month_abbr":
value = getattr(calendar, attr)
self.assertEqual(len(value), 13)
self.assertEqual(len(value[:]), 13)
self.assertEqual(value[0], "")
# ensure they're all unique
self.assertEqual(len(set(value)), 13)
# verify it "acts like a sequence" in two forms of iteration
self.assertEqual(value[::-1], list(reversed(value)))
def test_locale_calendars(self):
# ensure that Locale{Text,HTML}Calendar resets the locale properly
# (it is still not thread-safe though)
old_october = calendar.TextCalendar().formatmonthname(2010, 10, 10)
try:
cal = calendar.LocaleTextCalendar(locale='')
local_weekday = cal.formatweekday(1, 10)
local_month = cal.formatmonthname(2010, 10, 10)
except locale.Error:
# cannot set the system default locale -- skip rest of test
raise unittest.SkipTest('cannot set the system default locale')
self.assertIsInstance(local_weekday, str)
self.assertIsInstance(local_month, str)
self.assertEqual(len(local_weekday), 10)
self.assertGreaterEqual(len(local_month), 10)
cal = calendar.LocaleHTMLCalendar(locale='')
local_weekday = cal.formatweekday(1)
local_month = cal.formatmonthname(2010, 10)
self.assertIsInstance(local_weekday, str)
self.assertIsInstance(local_month, str)
new_october = calendar.TextCalendar().formatmonthname(2010, 10, 10)
self.assertEqual(old_october, new_october)
def test_itermonthdates(self):
# ensure itermonthdates doesn't overflow after datetime.MAXYEAR
# see #15421
list(calendar.Calendar().itermonthdates(datetime.MAXYEAR, 12))
class MonthCalendarTestCase(unittest.TestCase):
def setUp(self):
self.oldfirstweekday = calendar.firstweekday()
calendar.setfirstweekday(self.firstweekday)
def tearDown(self):
calendar.setfirstweekday(self.oldfirstweekday)
def check_weeks(self, year, month, weeks):
cal = calendar.monthcalendar(year, month)
self.assertEqual(len(cal), len(weeks))
for i in range(len(weeks)):
self.assertEqual(weeks[i], sum(day != 0 for day in cal[i]))
class MondayTestCase(MonthCalendarTestCase):
firstweekday = calendar.MONDAY
def test_february(self):
# A 28-day february starting on monday (7+7+7+7 days)
self.check_weeks(1999, 2, (7, 7, 7, 7))
# A 28-day february starting on tuesday (6+7+7+7+1 days)
self.check_weeks(2005, 2, (6, 7, 7, 7, 1))
# A 28-day february starting on sunday (1+7+7+7+6 days)
self.check_weeks(1987, 2, (1, 7, 7, 7, 6))
# A 29-day february starting on monday (7+7+7+7+1 days)
self.check_weeks(1988, 2, (7, 7, 7, 7, 1))
# A 29-day february starting on tuesday (6+7+7+7+2 days)
self.check_weeks(1972, 2, (6, 7, 7, 7, 2))
# A 29-day february starting on sunday (1+7+7+7+7 days)
self.check_weeks(2004, 2, (1, 7, 7, 7, 7))
def test_april(self):
# A 30-day april starting on monday (7+7+7+7+2 days)
self.check_weeks(1935, 4, (7, 7, 7, 7, 2))
# A 30-day april starting on tuesday (6+7+7+7+3 days)
self.check_weeks(1975, 4, (6, 7, 7, 7, 3))
# A 30-day april starting on sunday (1+7+7+7+7+1 days)
self.check_weeks(1945, 4, (1, 7, 7, 7, 7, 1))
# A 30-day april starting on saturday (2+7+7+7+7 days)
self.check_weeks(1995, 4, (2, 7, 7, 7, 7))
# A 30-day april starting on friday (3+7+7+7+6 days)
self.check_weeks(1994, 4, (3, 7, 7, 7, 6))
def test_december(self):
# A 31-day december starting on monday (7+7+7+7+3 days)
self.check_weeks(1980, 12, (7, 7, 7, 7, 3))
# A 31-day december starting on tuesday (6+7+7+7+4 days)
self.check_weeks(1987, 12, (6, 7, 7, 7, 4))
# A 31-day december starting on sunday (1+7+7+7+7+2 days)
self.check_weeks(1968, 12, (1, 7, 7, 7, 7, 2))
# A 31-day december starting on thursday (4+7+7+7+6 days)
self.check_weeks(1988, 12, (4, 7, 7, 7, 6))
# A 31-day december starting on friday (3+7+7+7+7 days)
self.check_weeks(2017, 12, (3, 7, 7, 7, 7))
# A 31-day december starting on saturday (2+7+7+7+7+1 days)
self.check_weeks(2068, 12, (2, 7, 7, 7, 7, 1))
class SundayTestCase(MonthCalendarTestCase):
firstweekday = calendar.SUNDAY
def test_february(self):
# A 28-day february starting on sunday (7+7+7+7 days)
self.check_weeks(2009, 2, (7, 7, 7, 7))
# A 28-day february starting on monday (6+7+7+7+1 days)
self.check_weeks(1999, 2, (6, 7, 7, 7, 1))
# A 28-day february starting on saturday (1+7+7+7+6 days)
self.check_weeks(1997, 2, (1, 7, 7, 7, 6))
# A 29-day february starting on sunday (7+7+7+7+1 days)
self.check_weeks(2004, 2, (7, 7, 7, 7, 1))
# A 29-day february starting on monday (6+7+7+7+2 days)
self.check_weeks(1960, 2, (6, 7, 7, 7, 2))
# A 29-day february starting on saturday (1+7+7+7+7 days)
self.check_weeks(1964, 2, (1, 7, 7, 7, 7))
def test_april(self):
# A 30-day april starting on sunday (7+7+7+7+2 days)
self.check_weeks(1923, 4, (7, 7, 7, 7, 2))
# A 30-day april starting on monday (6+7+7+7+3 days)
self.check_weeks(1918, 4, (6, 7, 7, 7, 3))
# A 30-day april starting on saturday (1+7+7+7+7+1 days)
self.check_weeks(1950, 4, (1, 7, 7, 7, 7, 1))
# A 30-day april starting on friday (2+7+7+7+7 days)
self.check_weeks(1960, 4, (2, 7, 7, 7, 7))
# A 30-day april starting on thursday (3+7+7+7+6 days)
self.check_weeks(1909, 4, (3, 7, 7, 7, 6))
def test_december(self):
# A 31-day december starting on sunday (7+7+7+7+3 days)
self.check_weeks(2080, 12, (7, 7, 7, 7, 3))
# A 31-day december starting on monday (6+7+7+7+4 days)
self.check_weeks(1941, 12, (6, 7, 7, 7, 4))
# A 31-day december starting on saturday (1+7+7+7+7+2 days)
self.check_weeks(1923, 12, (1, 7, 7, 7, 7, 2))
# A 31-day december starting on wednesday (4+7+7+7+6 days)
self.check_weeks(1948, 12, (4, 7, 7, 7, 6))
# A 31-day december starting on thursday (3+7+7+7+7 days)
self.check_weeks(1927, 12, (3, 7, 7, 7, 7))
# A 31-day december starting on friday (2+7+7+7+7+1 days)
self.check_weeks(1995, 12, (2, 7, 7, 7, 7, 1))
class TimegmTestCase(unittest.TestCase):
TIMESTAMPS = [0, 10, 100, 1000, 10000, 100000, 1000000,
1234567890, 1262304000, 1275785153,]
def test_timegm(self):
for secs in self.TIMESTAMPS:
tuple = time.gmtime(secs)
self.assertEqual(secs, calendar.timegm(tuple))
class MonthRangeTestCase(unittest.TestCase):
def test_january(self):
# Tests valid lower boundary case.
self.assertEqual(calendar.monthrange(2004,1), (3,31))
def test_february_leap(self):
# Tests February during leap year.
self.assertEqual(calendar.monthrange(2004,2), (6,29))
def test_february_nonleap(self):
# Tests February in non-leap year.
self.assertEqual(calendar.monthrange(2010,2), (0,28))
def test_december(self):
# Tests valid upper boundary case.
self.assertEqual(calendar.monthrange(2004,12), (2,31))
def test_zeroth_month(self):
# Tests low invalid boundary case.
with self.assertRaises(calendar.IllegalMonthError):
calendar.monthrange(2004, 0)
def test_thirteenth_month(self):
# Tests high invalid boundary case.
with self.assertRaises(calendar.IllegalMonthError):
calendar.monthrange(2004, 13)
def test_illegal_month_reported(self):
with self.assertRaisesRegex(calendar.IllegalMonthError, '65'):
calendar.monthrange(2004, 65)
class LeapdaysTestCase(unittest.TestCase):
def test_no_range(self):
# test when no range i.e. two identical years as args
self.assertEqual(calendar.leapdays(2010,2010), 0)
def test_no_leapdays(self):
# test when no leap years in range
self.assertEqual(calendar.leapdays(2010,2011), 0)
def test_no_leapdays_upper_boundary(self):
# test no leap years in range, when upper boundary is a leap year
self.assertEqual(calendar.leapdays(2010,2012), 0)
def test_one_leapday_lower_boundary(self):
# test when one leap year in range, lower boundary is leap year
self.assertEqual(calendar.leapdays(2012,2013), 1)
def test_several_leapyears_in_range(self):
self.assertEqual(calendar.leapdays(1997,2020), 5)
class ConsoleOutputTestCase(unittest.TestCase):
def test_outputs_bytes(self):
(return_code, stdout, stderr) = assert_python_ok('-m', 'calendar', '--type=html', '2010')
self.assertEqual(stdout[:6], b'<?xml ')
def test_main():
support.run_unittest(
OutputTestCase,
CalendarTestCase,
MondayTestCase,
SundayTestCase,
TimegmTestCase,
MonthRangeTestCase,
LeapdaysTestCase,
ConsoleOutputTestCase
)
if __name__ == "__main__":
test_main()
|
lgpl-3.0
|
naresh21/synergetics-edx-platform
|
common/lib/calc/calc/tests/test_preview.py
|
257
|
8723
|
# -*- coding: utf-8 -*-
"""
Unit tests for preview.py
"""
import unittest
from calc import preview
import pyparsing
class LatexRenderedTest(unittest.TestCase):
"""
Test the initializing code for LatexRendered.
Specifically that it stores the correct data and handles parens well.
"""
def test_simple(self):
"""
Test that the data values are stored without changing.
"""
math = 'x^2'
obj = preview.LatexRendered(math, tall=True)
self.assertEquals(obj.latex, math)
self.assertEquals(obj.sans_parens, math)
self.assertEquals(obj.tall, True)
def _each_parens(self, with_parens, math, parens, tall=False):
"""
Helper method to test the way parens are wrapped.
"""
obj = preview.LatexRendered(math, parens=parens, tall=tall)
self.assertEquals(obj.latex, with_parens)
self.assertEquals(obj.sans_parens, math)
self.assertEquals(obj.tall, tall)
def test_parens(self):
""" Test curvy parens. """
self._each_parens('(x+y)', 'x+y', '(')
def test_brackets(self):
""" Test brackets. """
self._each_parens('[x+y]', 'x+y', '[')
def test_squiggles(self):
""" Test curly braces. """
self._each_parens(r'\{x+y\}', 'x+y', '{')
def test_parens_tall(self):
""" Test curvy parens with the tall parameter. """
self._each_parens(r'\left(x^y\right)', 'x^y', '(', tall=True)
def test_brackets_tall(self):
""" Test brackets, also tall. """
self._each_parens(r'\left[x^y\right]', 'x^y', '[', tall=True)
def test_squiggles_tall(self):
""" Test tall curly braces. """
self._each_parens(r'\left\{x^y\right\}', 'x^y', '{', tall=True)
def test_bad_parens(self):
""" Check that we get an error with invalid parens. """
with self.assertRaisesRegexp(Exception, 'Unknown parenthesis'):
preview.LatexRendered('x^2', parens='not parens')
class LatexPreviewTest(unittest.TestCase):
"""
Run integrative tests for `latex_preview`.
All functionality was tested `RenderMethodsTest`, but see if it combines
all together correctly.
"""
def test_no_input(self):
"""
With no input (including just whitespace), see that no error is thrown.
"""
self.assertEquals('', preview.latex_preview(''))
self.assertEquals('', preview.latex_preview(' '))
self.assertEquals('', preview.latex_preview(' \t '))
def test_number_simple(self):
""" Simple numbers should pass through. """
self.assertEquals(preview.latex_preview('3.1415'), '3.1415')
def test_number_suffix(self):
""" Suffixes should be escaped. """
self.assertEquals(preview.latex_preview('1.618k'), r'1.618\text{k}')
def test_number_sci_notation(self):
""" Numbers with scientific notation should display nicely """
self.assertEquals(
preview.latex_preview('6.0221413E+23'),
r'6.0221413\!\times\!10^{+23}'
)
self.assertEquals(
preview.latex_preview('-6.0221413E+23'),
r'-6.0221413\!\times\!10^{+23}'
)
def test_number_sci_notation_suffix(self):
""" Test numbers with both of these. """
self.assertEquals(
preview.latex_preview('6.0221413E+23k'),
r'6.0221413\!\times\!10^{+23}\text{k}'
)
self.assertEquals(
preview.latex_preview('-6.0221413E+23k'),
r'-6.0221413\!\times\!10^{+23}\text{k}'
)
def test_variable_simple(self):
""" Simple valid variables should pass through. """
self.assertEquals(preview.latex_preview('x', variables=['x']), 'x')
def test_greek(self):
""" Variable names that are greek should be formatted accordingly. """
self.assertEquals(preview.latex_preview('pi'), r'\pi')
def test_variable_subscript(self):
""" Things like 'epsilon_max' should display nicely """
self.assertEquals(
preview.latex_preview('epsilon_max', variables=['epsilon_max']),
r'\epsilon_{max}'
)
def test_function_simple(self):
""" Valid function names should be escaped. """
self.assertEquals(
preview.latex_preview('f(3)', functions=['f']),
r'\text{f}(3)'
)
def test_function_tall(self):
r""" Functions surrounding a tall element should have \left, \right """
self.assertEquals(
preview.latex_preview('f(3^2)', functions=['f']),
r'\text{f}\left(3^{2}\right)'
)
def test_function_sqrt(self):
""" Sqrt function should be handled specially. """
self.assertEquals(preview.latex_preview('sqrt(3)'), r'\sqrt{3}')
def test_function_log10(self):
""" log10 function should be handled specially. """
self.assertEquals(preview.latex_preview('log10(3)'), r'\log_{10}(3)')
def test_function_log2(self):
""" log2 function should be handled specially. """
self.assertEquals(preview.latex_preview('log2(3)'), r'\log_2(3)')
def test_power_simple(self):
""" Powers should wrap the elements with braces correctly. """
self.assertEquals(preview.latex_preview('2^3^4'), '2^{3^{4}}')
def test_power_parens(self):
""" Powers should ignore the parenthesis of the last math. """
self.assertEquals(preview.latex_preview('2^3^(4+5)'), '2^{3^{4+5}}')
def test_parallel(self):
r""" Parallel items should combine with '\|'. """
self.assertEquals(preview.latex_preview('2||3'), r'2\|3')
def test_product_mult_only(self):
r""" Simple products should combine with a '\cdot'. """
self.assertEquals(preview.latex_preview('2*3'), r'2\cdot 3')
def test_product_big_frac(self):
""" Division should combine with '\frac'. """
self.assertEquals(
preview.latex_preview('2*3/4/5'),
r'\frac{2\cdot 3}{4\cdot 5}'
)
def test_product_single_frac(self):
""" Division should ignore parens if they are extraneous. """
self.assertEquals(
preview.latex_preview('(2+3)/(4+5)'),
r'\frac{2+3}{4+5}'
)
def test_product_keep_going(self):
"""
Complex products/quotients should split into many '\frac's when needed.
"""
self.assertEquals(
preview.latex_preview('2/3*4/5*6'),
r'\frac{2}{3}\cdot \frac{4}{5}\cdot 6'
)
def test_sum(self):
""" Sums should combine its elements. """
# Use 'x' as the first term (instead of, say, '1'), so it can't be
# interpreted as a negative number.
self.assertEquals(
preview.latex_preview('-x+2-3+4', variables=['x']),
'-x+2-3+4'
)
def test_sum_tall(self):
""" A complicated expression should not hide the tallness. """
self.assertEquals(
preview.latex_preview('(2+3^2)'),
r'\left(2+3^{2}\right)'
)
def test_complicated(self):
"""
Given complicated input, ensure that exactly the correct string is made.
"""
self.assertEquals(
preview.latex_preview('11*f(x)+x^2*(3||4)/sqrt(pi)'),
r'11\cdot \text{f}(x)+\frac{x^{2}\cdot (3\|4)}{\sqrt{\pi}}'
)
self.assertEquals(
preview.latex_preview('log10(1+3/4/Cos(x^2)*(x+1))',
case_sensitive=True),
(r'\log_{10}\left(1+\frac{3}{4\cdot \text{Cos}\left(x^{2}\right)}'
r'\cdot (x+1)\right)')
)
def test_syntax_errors(self):
"""
Test a lot of math strings that give syntax errors
Rather than have a lot of self.assertRaises, make a loop and keep track
of those that do not throw a `ParseException`, and assert at the end.
"""
bad_math_list = [
'11+',
'11*',
'f((x)',
'sqrt(x^)',
'3f(x)', # Not 3*f(x)
'3|4',
'3|||4'
]
bad_exceptions = {}
for math in bad_math_list:
try:
preview.latex_preview(math)
except pyparsing.ParseException:
pass # This is what we were expecting. (not excepting :P)
except Exception as error: # pragma: no cover
bad_exceptions[math] = error
else: # pragma: no cover
# If there is no exception thrown, this is a problem
bad_exceptions[math] = None
self.assertEquals({}, bad_exceptions)
|
agpl-3.0
|
pacificcoin/PacificCoin
|
share/qt/make_spinner.py
|
4415
|
1035
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
mit
|
bowlofstew/code-for-blog
|
2009/pyqtris/pyqtris_src/lib/aboutdialog.py
|
13
|
3454
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from images import get_logo_pixmap
about_text = """\
PyQtris is a simple, free Tetris clone, developed by
Eli Bendersky (http://eli.thegreenplace.net) in Python
using PyQt as the GUI toolkit.
It was tested on Windows and Linux with Python 2.6
and PyQt 4.5
Copyright (C) <2009> Eli Bendersky
License: LGPL (http://www.gnu.org/copyleft/lgpl.html)
"""
scoring_text = """\
The score in PyQtris is computed as follows:
1) A point is earned for every line the active figure
is dropped with the 'space' key. For example, if you
pressed 'space' and the figure dropped 10 lines before
reaching the bottom, you get 10 points.
2) Points are awarded for completed lines, as follows:
30 points for a single line, 120 for two lines, 270
for three lines and 480 for four lines.
3) The bonuses explained in (1) and (2) are further
increased with higher levels. On level 2, the bonus
is multiplied by 1.1, on level 3 by 1.2, on level 4
by 1.3 and so on.
The game level increases with each 10 completed lines."""
keys_desc = [
('Left arrow', 'Move figure left'),
('Right arrow', 'Move figure right'),
('Down arrow', 'Move figure down faster'),
('Up arrow', 'Rotate figure clockwise'),
('Space', 'Drop figure'),
('Ctrl-H', 'Show high scores'),
('Ctrl-N', 'New game'),
('Ctrl-P', 'Pause / Resume game'),
('Ctrl-Q', 'Quit'),
('F1', 'About PyQtris'),
]
class AboutDialog(QDialog):
def __init__(self, parent=None):
super(AboutDialog, self).__init__(parent)
self.setWindowTitle('About PyQtris')
#
# About
#
about_page = QWidget(self)
logo = QLabel()
logo.setPixmap(get_logo_pixmap())
about_label = QLabel(about_text)
about_layout = QVBoxLayout()
about_layout.addWidget(logo, 0, Qt.AlignCenter)
about_layout.addWidget(about_label, 0, Qt.AlignCenter)
about_page.setLayout(about_layout)
#
# Keys
#
keys_page = QWidget(self)
keys_layout = QGridLayout()
i = 0
for key, desc in keys_desc:
keys_layout.addWidget(QLabel(key), i, 0)
keys_layout.addWidget(QLabel(desc), i, 1)
i += 1
keys_page.setLayout(keys_layout)
#
# Scoring
#
score_page = QWidget(self)
score_label = QLabel(scoring_text)
score_layout = QVBoxLayout()
score_layout.addWidget(score_label)
score_page.setLayout(score_layout)
tabs = QTabWidget(self)
tabs.addTab(about_page, 'About')
tabs.addTab(keys_page, 'Keys')
tabs.addTab(score_page, 'Scoring')
#
# Dialog layout
#
okbutton = QPushButton('&OK')
self.connect(okbutton, SIGNAL('clicked()'), self, SLOT('accept()'))
bbox = QHBoxLayout()
bbox.addStretch()
bbox.addWidget(okbutton)
bbox.addStretch()
layout = QVBoxLayout()
layout.addWidget(tabs)
layout.addLayout(bbox)
self.setLayout(layout)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
dialog = AboutDialog()
dialog.exec_()
|
unlicense
|
ak2703/edx-platform
|
lms/djangoapps/debug/views.py
|
119
|
2136
|
"""Views for debugging and diagnostics"""
import pprint
import traceback
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.contrib.auth.decorators import login_required
from django.utils.html import escape
from django.views.decorators.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
from codejail.safe_exec import safe_exec
from mako.exceptions import TopLevelLookupException
@login_required
@ensure_csrf_cookie
def run_python(request):
"""A page to allow testing the Python sandbox on a production server."""
if not request.user.is_staff:
raise Http404
c = {}
c['code'] = ''
c['results'] = None
if request.method == 'POST':
py_code = c['code'] = request.POST.get('code')
g = {}
try:
safe_exec(py_code, g)
except Exception as e:
c['results'] = traceback.format_exc()
else:
c['results'] = pprint.pformat(g)
return render_to_response("debug/run_python_form.html", c)
@login_required
def show_parameters(request):
"""A page that shows what parameters were on the URL and post."""
html = []
for name, value in sorted(request.GET.items()):
html.append(escape("GET {}: {!r}".format(name, value)))
for name, value in sorted(request.POST.items()):
html.append(escape("POST {}: {!r}".format(name, value)))
return HttpResponse("\n".join("<p>{}</p>".format(h) for h in html))
def show_reference_template(request, template):
"""
Shows the specified template as an HTML page. This is used only in debug mode to allow the UX team
to produce and work with static reference templates.
e.g. /template/ux/reference/container.html shows the template under ux/reference/container.html
Note: dynamic parameters can also be passed to the page.
e.g. /template/ux/reference/container.html?name=Foo
"""
try:
return render_to_response(template, request.GET.dict())
except TopLevelLookupException:
return HttpResponseNotFound("Couldn't find template {template}".format(template=template))
|
agpl-3.0
|
petewarden/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/sql_dataset_test.py
|
1
|
29219
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.SqlDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import sqlite3
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SqlDatasetTestBase(test_base.DatasetTestBase):
"""Base class for setting up and testing SqlDataset."""
def _createSqlDataset(self,
query,
output_types,
driver_name="sqlite",
num_repeats=1):
dataset = readers.SqlDataset(driver_name, self.data_source_name, query,
output_types).repeat(num_repeats)
return dataset
def setUp(self):
super(SqlDatasetTestBase, self).setUp()
self.data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
conn = sqlite3.connect(self.data_source_name)
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS students")
c.execute("DROP TABLE IF EXISTS people")
c.execute("DROP TABLE IF EXISTS townspeople")
c.execute("DROP TABLE IF EXISTS data")
c.execute(
"CREATE TABLE IF NOT EXISTS students (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), motto VARCHAR(100), "
"school_id VARCHAR(100), favorite_nonsense_word VARCHAR(100), "
"desk_number INTEGER, income INTEGER, favorite_number INTEGER, "
"favorite_big_number INTEGER, favorite_negative_number INTEGER, "
"favorite_medium_sized_number INTEGER, brownie_points INTEGER, "
"account_balance INTEGER, registration_complete INTEGER)")
c.executemany(
"INSERT INTO students (first_name, last_name, motto, school_id, "
"favorite_nonsense_word, desk_number, income, favorite_number, "
"favorite_big_number, favorite_negative_number, "
"favorite_medium_sized_number, brownie_points, account_balance, "
"registration_complete) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[("John", "Doe", "Hi!", "123", "n\0nsense", 9, 0, 2147483647,
9223372036854775807, -2, 32767, 0, 0, 1),
("Jane", "Moe", "Hi again!", "1000", "nonsense\0", 127, -20000,
-2147483648, -9223372036854775808, -128, -32768, 255, 65535, 0)])
c.execute(
"CREATE TABLE IF NOT EXISTS people (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), state VARCHAR(100))")
c.executemany(
"INSERT INTO PEOPLE (first_name, last_name, state) VALUES (?, ?, ?)",
[("Benjamin", "Franklin", "Pennsylvania"), ("John", "Doe",
"California")])
c.execute(
"CREATE TABLE IF NOT EXISTS townspeople (id INTEGER NOT NULL PRIMARY "
"KEY, first_name VARCHAR(100), last_name VARCHAR(100), victories "
"FLOAT, accolades FLOAT, triumphs FLOAT)")
c.executemany(
"INSERT INTO townspeople (first_name, last_name, victories, "
"accolades, triumphs) VALUES (?, ?, ?, ?, ?)",
[("George", "Washington", 20.00,
1331241.321342132321324589798264627463827647382647382643874,
9007199254740991.0),
("John", "Adams", -19.95,
1331241321342132321324589798264627463827647382647382643874.0,
9007199254740992.0)])
c.execute("CREATE TABLE IF NOT EXISTS data (col1 INTEGER)")
c.executemany("INSERT INTO DATA VALUES (?)", [(0,), (1,), (2,)])
conn.commit()
conn.close()
class SqlDatasetTest(SqlDatasetTestBase, parameterized.TestCase):
# Test that SqlDataset can read from a database table.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSet(self):
for _ in range(2): # Run twice to verify statelessness of db operations.
dataset = self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string),
num_repeats=2)
self.assertDatasetProduces(
dataset,
expected_output=[(b"John", b"Doe", b"Hi!"),
(b"Jane", b"Moe", b"Hi again!")] * 2,
num_test_iterations=2)
# Test that SqlDataset works on a join query.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetJoinQuery(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT students.first_name, state, motto FROM students "
"INNER JOIN people "
"ON students.first_name = people.first_name "
"AND students.last_name = people.last_name",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"California", b"Hi!"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset can read a database entry with a null-terminator
# in the middle of the text and place the entry in a `string` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetNullTerminator(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, favorite_nonsense_word "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"n\0nsense"), self.evaluate(get_next()))
self.assertEqual((b"Jane", b"Moe", b"nonsense\0"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset works when used on two different queries.
# Because the output types of the dataset must be determined at graph-creation
# time, the two queries must have the same number and types of columns.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetReuseSqlDataset(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"Hi!"), self.evaluate(get_next()))
self.assertEqual((b"Jane", b"Moe", b"Hi again!"), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, state FROM people "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"California"),
self.evaluate(get_next()))
self.assertEqual((b"Benjamin", b"Franklin", b"Pennsylvania"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that an `OutOfRangeError` is raised on the first call to
# `get_next_str_only` if result set is empty.
@combinations.generate(test_base.default_test_combinations())
def testReadEmptyResultSet(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"WHERE first_name = 'Nonexistent'",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that an error is raised when `driver_name` is invalid.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetWithInvalidDriverName(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = self._createSqlDataset(
driver_name="sqlfake",
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string))
self.assertDatasetProduces(dataset, expected_output=[])
# Test that an error is raised when a column name in `query` is nonexistent
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetWithInvalidColumnName(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, fake_column FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next())
# Test that an error is raised when there is a syntax error in `query`.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetOfQueryWithSyntaxError(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELEmispellECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next())
# Test that an error is raised when the number of columns in `query`
# does not match the length of `, output_types`.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetWithMismatchBetweenColumnsAndOutputTypes(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Test that no results are returned when `query` is an insert query rather
# than a select query. In particular, the error refers to the number of
# output types passed to the op not matching the number of columns in the
# result set of the query (namely, 0 for an insert statement.)
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetOfInsertQuery(self):
get_next = self.getNext(
self._createSqlDataset(
query="INSERT INTO students (first_name, last_name, motto) "
"VALUES ('Foo', 'Bar', 'Baz'), ('Fizz', 'Buzz', 'Fizzbuzz')",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int8` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt8(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int8)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int8` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt8NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int8, dtypes.int8)))
self.assertEqual((b"John", 0, -2), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int8` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt8MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT desk_number, favorite_negative_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.int8, dtypes.int8)))
self.assertEqual((9, -2), self.evaluate(get_next()))
# Max and min values of int8
self.assertEqual((127, -128), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int16` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt16(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int16` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt16NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16, dtypes.int16)))
self.assertEqual((b"John", 0, -2), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int16` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt16MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16)))
# Max value of int16
self.assertEqual((b"John", 32767), self.evaluate(get_next()))
# Min value of int16
self.assertEqual((b"Jane", -32768), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int32` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt32(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int32` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt32NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 0), self.evaluate(get_next()))
self.assertEqual((b"Jane", -20000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int32` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt32MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
# Max value of int32
self.assertEqual((b"John", 2147483647), self.evaluate(get_next()))
# Min value of int32
self.assertEqual((b"Jane", -2147483648), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a numeric `varchar` from a SQLite database
# table and place it in an `int32` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt32VarCharColumnAsInt(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, school_id FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 123), self.evaluate(get_next()))
self.assertEqual((b"Jane", 1000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in an `int64` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt64(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int64` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt64NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
self.assertEqual((b"John", 0), self.evaluate(get_next()))
self.assertEqual((b"Jane", -20000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int64` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetInt64MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_big_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
# Max value of int64
self.assertEqual((b"John", 9223372036854775807), self.evaluate(get_next()))
# Min value of int64
self.assertEqual((b"Jane", -9223372036854775808), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in a `uint8` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetUInt8(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint8)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read the minimum and maximum uint8 values from a
# SQLite database table and place them in `uint8` tensors.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetUInt8MinAndMaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, brownie_points FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint8)))
# Min value of uint8
self.assertEqual((b"John", 0), self.evaluate(get_next()))
# Max value of uint8
self.assertEqual((b"Jane", 255), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in a `uint16` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetUInt16(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint16)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read the minimum and maximum uint16 values from a
# SQLite database table and place them in `uint16` tensors.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetUInt16MinAndMaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, account_balance FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint16)))
# Min value of uint16
self.assertEqual((b"John", 0), self.evaluate(get_next()))
# Max value of uint16
self.assertEqual((b"Jane", 65535), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a 0-valued and 1-valued integer from a
# SQLite database table and place them as `True` and `False` respectively
# in `bool` tensors.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetBool(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, registration_complete FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.bool)))
self.assertEqual((b"John", True), self.evaluate(get_next()))
self.assertEqual((b"Jane", False), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer that is not 0-valued or 1-valued
# from a SQLite database table and place it as `True` in a `bool` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetBoolNotZeroOrOne(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.bool)))
self.assertEqual((b"John", True), self.evaluate(get_next()))
self.assertEqual((b"Jane", True), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a float from a SQLite database table
# and place it in a `float64` tensor.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetFloat64(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, victories FROM townspeople "
"ORDER BY first_name",
output_types=(dtypes.string, dtypes.string, dtypes.float64)))
self.assertEqual((b"George", b"Washington", 20.0),
self.evaluate(get_next()))
self.assertEqual((b"John", b"Adams", -19.95), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a float from a SQLite database table beyond
# the precision of 64-bit IEEE, without throwing an error. Test that
# `SqlDataset` identifies such a value as equal to itself.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetFloat64OverlyPrecise(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, accolades FROM townspeople "
"ORDER BY first_name",
output_types=(dtypes.string, dtypes.string, dtypes.float64)))
self.assertEqual(
(b"George", b"Washington",
1331241.321342132321324589798264627463827647382647382643874),
self.evaluate(get_next()))
self.assertEqual(
(b"John", b"Adams",
1331241321342132321324589798264627463827647382647382643874.0),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a float from a SQLite database table,
# representing the largest integer representable as a 64-bit IEEE float
# such that the previous integer is also representable as a 64-bit IEEE float.
# Test that `SqlDataset` can distinguish these two numbers.
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetFloat64LargestConsecutiveWholeNumbersNotEqual(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, triumphs FROM townspeople "
"ORDER BY first_name",
output_types=(dtypes.string, dtypes.string, dtypes.float64)))
self.assertNotEqual((b"George", b"Washington", 9007199254740992.0),
self.evaluate(get_next()))
self.assertNotEqual((b"John", b"Adams", 9007199254740991.0),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset can stop correctly when combined with batch
@combinations.generate(test_base.default_test_combinations())
def testReadResultSetWithBatchStop(self):
dataset = self._createSqlDataset(
query="SELECT * FROM data", output_types=(dtypes.int32))
dataset = dataset.map(lambda x: array_ops.identity(x))
get_next = self.getNext(dataset.batch(2))
self.assertAllEqual(self.evaluate(get_next()), [0, 1])
self.assertAllEqual(self.evaluate(get_next()), [2])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
class SqlDatasetCheckpointTest(SqlDatasetTestBase,
checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, num_repeats):
data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
driver_name = array_ops.placeholder_with_default(
array_ops.constant("sqlite", dtypes.string), shape=[])
query = ("SELECT first_name, last_name, motto FROM students ORDER BY "
"first_name DESC")
output_types = (dtypes.string, dtypes.string, dtypes.string)
return readers.SqlDataset(driver_name, data_source_name, query,
output_types).repeat(num_repeats)
@combinations.generate(test_base.default_test_combinations())
def testCore(self):
num_repeats = 4
num_outputs = num_repeats * 2
self.run_core_tests(lambda: self._build_dataset(num_repeats), num_outputs)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
kingmotley/SickRage
|
lib/rtorrent/peer.py
|
174
|
3699
|
# Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# from rtorrent.rpc import Method
import rtorrent.rpc
from rtorrent.common import safe_repr
Method = rtorrent.rpc.Method
class Peer:
"""Represents an individual peer within a L{Torrent} instance."""
def __init__(self, _rt_obj, info_hash, **kwargs):
self._rt_obj = _rt_obj
self.info_hash = info_hash # : info hash for the torrent the peer is associated with
for k in kwargs.keys():
setattr(self, k, kwargs.get(k, None))
self.rpc_id = "{0}:p{1}".format(
self.info_hash, self.id) # : unique id to pass to rTorrent
def __repr__(self):
return safe_repr("Peer(id={0})", self.id)
def update(self):
"""Refresh peer data
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self._rt_obj)]
for method in retriever_methods:
multicall.add(method, self.rpc_id)
multicall.call()
methods = [
# RETRIEVERS
Method(Peer, 'is_preferred', 'p.is_preferred',
boolean=True,
),
Method(Peer, 'get_down_rate', 'p.get_down_rate'),
Method(Peer, 'is_unwanted', 'p.is_unwanted',
boolean=True,
),
Method(Peer, 'get_peer_total', 'p.get_peer_total'),
Method(Peer, 'get_peer_rate', 'p.get_peer_rate'),
Method(Peer, 'get_port', 'p.get_port'),
Method(Peer, 'is_snubbed', 'p.is_snubbed',
boolean=True,
),
Method(Peer, 'get_id_html', 'p.get_id_html'),
Method(Peer, 'get_up_rate', 'p.get_up_rate'),
Method(Peer, 'is_banned', 'p.banned',
boolean=True,
),
Method(Peer, 'get_completed_percent', 'p.get_completed_percent'),
Method(Peer, 'completed_percent', 'p.completed_percent'),
Method(Peer, 'get_id', 'p.get_id'),
Method(Peer, 'is_obfuscated', 'p.is_obfuscated',
boolean=True,
),
Method(Peer, 'get_down_total', 'p.get_down_total'),
Method(Peer, 'get_client_version', 'p.get_client_version'),
Method(Peer, 'get_address', 'p.get_address'),
Method(Peer, 'is_incoming', 'p.is_incoming',
boolean=True,
),
Method(Peer, 'is_encrypted', 'p.is_encrypted',
boolean=True,
),
Method(Peer, 'get_options_str', 'p.get_options_str'),
Method(Peer, 'get_client_version', 'p.client_version'),
Method(Peer, 'get_up_total', 'p.get_up_total'),
# MODIFIERS
]
|
gpl-3.0
|
brianrodri/oppia
|
core/domain/user_jobs_one_off_test.py
|
2
|
10034
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user-related one-off computations."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from core.domain import event_services
from core.domain import rating_services
from core.domain import taskqueue_services
from core.domain import user_jobs_one_off
from core.domain import user_services
from core.tests import test_utils
import feconf
class DashboardStatsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard stats job."""
CURRENT_DATE_AS_STRING = user_services.get_current_date_as_string()
DATE_AFTER_ONE_WEEK = (
(datetime.datetime.utcnow() + datetime.timedelta(7)).strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT))
USER_SESSION_ID = 'session1'
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
EXP_VERSION = 1
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.DashboardStatsOneOffJob.create_new()
user_jobs_one_off.DashboardStatsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
def setUp(self):
super(DashboardStatsOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def mock_get_current_date_as_string(self):
return self.CURRENT_DATE_AS_STRING
def _rate_exploration(self, user_id, exp_id, rating):
"""Assigns rating to the exploration corresponding to the given
exploration id.
Args:
user_id: str. The user id.
exp_id: str. The exploration id.
rating: int. The rating to be assigned to the given exploration.
"""
rating_services.assign_rating_to_exploration(user_id, exp_id, rating)
def _record_play(self, exp_id, state):
"""Calls StartExplorationEventHandler and records the 'play' event
corresponding to the given exploration id.
Args:
exp_id: str. The exploration id.
state: dict(str, *). The state of the exploration corresponding to
the given id.
"""
event_services.StartExplorationEventHandler.record(
exp_id, self.EXP_VERSION, state, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
def test_weekly_stats(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._record_play(exp_id, init_state_name)
self._rate_exploration('user1', exp_id, 5)
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(weekly_stats, None)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id), None)
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 5.0,
'total_plays': 1
}
}])
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id),
{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 5.0,
'total_plays': 1
}
})
def test_weekly_stats_if_no_explorations(self):
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}])
def test_weekly_stats_for_single_exploration(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._record_play(exp_id, init_state_name)
self._rate_exploration('user1', exp_id, 5)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.process_and_flush_pending_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 5.0,
'total_plays': 1
}
}])
def test_weekly_stats_for_multiple_explorations(self):
exploration_1 = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id_1 = exploration_1.id
exploration_2 = self.save_new_valid_exploration(
self.EXP_ID_2, self.owner_id)
exp_id_2 = exploration_2.id
init_state_name_1 = exploration_1.init_state_name
self._record_play(exp_id_1, init_state_name_1)
self._rate_exploration('user1', exp_id_1, 5)
self._rate_exploration('user2', exp_id_2, 4)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.process_and_flush_pending_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 2,
'average_ratings': 4.5,
'total_plays': 1
}
}])
def test_stats_for_multiple_weeks(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._rate_exploration('user1', exp_id, 4)
self._record_play(exp_id, init_state_name)
self._record_play(exp_id, init_state_name)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 2,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.process_and_flush_pending_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 4.0,
'total_plays': 2
}
}])
self._rate_exploration('user2', exp_id, 2)
def _mock_get_date_after_one_week():
"""Returns the date of the next week."""
return self.DATE_AFTER_ONE_WEEK
with self.swap(
user_services,
'get_current_date_as_string',
_mock_get_date_after_one_week):
self._run_one_off_job()
expected_results_list = [
{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 4.0,
'total_plays': 2
}
},
{
_mock_get_date_after_one_week(): {
'num_ratings': 2,
'average_ratings': 3.0,
'total_plays': 2
}
}
]
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(weekly_stats, expected_results_list)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id),
expected_results_list[1])
|
apache-2.0
|
TechBK/horizon-dev
|
openstack_dashboard/dashboards/admin/overview/views.py
|
63
|
2858
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.template.defaultfilters import floatformat # noqa
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils import csvbase
from openstack_dashboard import api
from openstack_dashboard import usage
class GlobalUsageCsvRenderer(csvbase.BaseCsvResponse):
columns = [_("Project Name"), _("VCPUs"), _("RAM (MB)"),
_("Disk (GB)"), _("Usage (Hours)")]
def get_row_data(self):
for u in self.context['usage'].usage_list:
yield (u.project_name or u.tenant_id,
u.vcpus,
u.memory_mb,
u.local_gb,
floatformat(u.vcpu_hours, 2))
class GlobalOverview(usage.UsageView):
table_class = usage.GlobalUsageTable
usage_class = usage.GlobalUsage
template_name = 'admin/overview/usage.html'
csv_response_class = GlobalUsageCsvRenderer
def get_context_data(self, **kwargs):
context = super(GlobalOverview, self).get_context_data(**kwargs)
context['monitoring'] = getattr(settings, 'EXTERNAL_MONITORING', [])
return context
def get_data(self):
data = super(GlobalOverview, self).get_data()
# Pre-fill project names
try:
projects, has_more = api.keystone.tenant_list(self.request)
except Exception:
projects = []
exceptions.handle(self.request,
_('Unable to retrieve project list.'))
for instance in data:
project = filter(lambda t: t.id == instance.tenant_id, projects)
# If we could not get the project name, show the tenant_id with
# a 'Deleted' identifier instead.
if project:
instance.project_name = getattr(project[0], "name", None)
else:
deleted = _("Deleted")
instance.project_name = translation.string_concat(
instance.tenant_id, " (", deleted, ")")
return data
|
apache-2.0
|
lucienimmink/scanner.py
|
mutagen/_senf/_print.py
|
9
|
12366
|
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import os
import ctypes
import re
from ._fsnative import _encoding, is_win, is_unix, _surrogatepass, bytes2fsn
from ._compat import text_type, PY2, PY3
from ._winansi import AnsiState, ansi_split
from . import _winapi as winapi
def print_(*objects, **kwargs):
"""print_(*objects, sep=None, end=None, file=None, flush=False)
Args:
objects (object): zero or more objects to print
sep (str): Object separator to use, defaults to ``" "``
end (str): Trailing string to use, defaults to ``"\\n"``.
If end is ``"\\n"`` then `os.linesep` is used.
file (object): A file-like object, defaults to `sys.stdout`
flush (bool): If the file stream should be flushed
Raises:
EnvironmentError
Like print(), but:
* Supports printing filenames under Unix + Python 3 and Windows + Python 2
* Emulates ANSI escape sequence support under Windows
* Never fails due to encoding/decoding errors. Tries hard to get everything
on screen as is, but will fall back to "?" if all fails.
This does not conflict with ``colorama``, but will not use it on Windows.
"""
sep = kwargs.get("sep")
sep = sep if sep is not None else " "
end = kwargs.get("end")
end = end if end is not None else "\n"
file = kwargs.get("file")
file = file if file is not None else sys.stdout
flush = bool(kwargs.get("flush", False))
if is_win:
_print_windows(objects, sep, end, file, flush)
else:
_print_unix(objects, sep, end, file, flush)
def _print_unix(objects, sep, end, file, flush):
"""A print_() implementation which writes bytes"""
encoding = _encoding
if isinstance(sep, text_type):
sep = sep.encode(encoding, "replace")
if not isinstance(sep, bytes):
raise TypeError
if isinstance(end, text_type):
end = end.encode(encoding, "replace")
if not isinstance(end, bytes):
raise TypeError
if end == b"\n":
end = os.linesep
if PY3:
end = end.encode("ascii")
parts = []
for obj in objects:
if not isinstance(obj, text_type) and not isinstance(obj, bytes):
obj = text_type(obj)
if isinstance(obj, text_type):
if PY2:
obj = obj.encode(encoding, "replace")
else:
try:
obj = obj.encode(encoding, "surrogateescape")
except UnicodeEncodeError:
obj = obj.encode(encoding, "replace")
assert isinstance(obj, bytes)
parts.append(obj)
data = sep.join(parts) + end
assert isinstance(data, bytes)
file = getattr(file, "buffer", file)
try:
file.write(data)
except TypeError:
if PY3:
# For StringIO, first try with surrogates
surr_data = data.decode(encoding, "surrogateescape")
try:
file.write(surr_data)
except (TypeError, ValueError):
file.write(data.decode(encoding, "replace"))
else:
# for file like objects with don't support bytes
file.write(data.decode(encoding, "replace"))
if flush:
file.flush()
ansi_state = AnsiState()
def _print_windows(objects, sep, end, file, flush):
"""The windows implementation of print_()"""
h = winapi.INVALID_HANDLE_VALUE
try:
fileno = file.fileno()
except (EnvironmentError, AttributeError):
pass
else:
if fileno == 1:
h = winapi.GetStdHandle(winapi.STD_OUTPUT_HANDLE)
elif fileno == 2:
h = winapi.GetStdHandle(winapi.STD_ERROR_HANDLE)
encoding = _encoding
parts = []
for obj in objects:
if isinstance(obj, bytes):
obj = obj.decode(encoding, "replace")
if not isinstance(obj, text_type):
obj = text_type(obj)
parts.append(obj)
if isinstance(sep, bytes):
sep = sep.decode(encoding, "replace")
if not isinstance(sep, text_type):
raise TypeError
if isinstance(end, bytes):
end = end.decode(encoding, "replace")
if not isinstance(end, text_type):
raise TypeError
if end == u"\n":
end = os.linesep
text = sep.join(parts) + end
assert isinstance(text, text_type)
is_console = True
if h == winapi.INVALID_HANDLE_VALUE:
is_console = False
else:
# get the default value
info = winapi.CONSOLE_SCREEN_BUFFER_INFO()
if not winapi.GetConsoleScreenBufferInfo(h, ctypes.byref(info)):
is_console = False
if is_console:
# make sure we flush before we apply any console attributes
file.flush()
# try to force a utf-8 code page, use the output CP if that fails
cp = winapi.GetConsoleOutputCP()
try:
encoding = "utf-8"
if winapi.SetConsoleOutputCP(65001) == 0:
encoding = None
for is_ansi, part in ansi_split(text):
if is_ansi:
ansi_state.apply(h, part)
else:
if encoding is not None:
data = part.encode(encoding, _surrogatepass)
else:
data = _encode_codepage(cp, part)
os.write(fileno, data)
finally:
# reset the code page to what we had before
winapi.SetConsoleOutputCP(cp)
else:
# try writing bytes first, so in case of Python 2 StringIO we get
# the same type on all platforms
try:
file.write(text.encode("utf-8", _surrogatepass))
except (TypeError, ValueError):
file.write(text)
if flush:
file.flush()
def _readline_windows():
"""Raises OSError"""
try:
fileno = sys.stdin.fileno()
except (EnvironmentError, AttributeError):
fileno = -1
# In case stdin is replaced, read from that
if fileno != 0:
return _readline_windows_fallback()
h = winapi.GetStdHandle(winapi.STD_INPUT_HANDLE)
if h == winapi.INVALID_HANDLE_VALUE:
return _readline_windows_fallback()
buf_size = 1024
buf = ctypes.create_string_buffer(buf_size * ctypes.sizeof(winapi.WCHAR))
read = winapi.DWORD()
text = u""
while True:
if winapi.ReadConsoleW(
h, buf, buf_size, ctypes.byref(read), None) == 0:
if not text:
return _readline_windows_fallback()
raise ctypes.WinError()
data = buf[:read.value * ctypes.sizeof(winapi.WCHAR)]
text += data.decode("utf-16-le", _surrogatepass)
if text.endswith(u"\r\n"):
return text[:-2]
def _decode_codepage(codepage, data):
"""
Args:
codepage (int)
data (bytes)
Returns:
`text`
Decodes data using the given codepage. If some data can't be decoded
using the codepage it will not fail.
"""
assert isinstance(data, bytes)
if not data:
return u""
# get the required buffer length first
length = winapi.MultiByteToWideChar(codepage, 0, data, len(data), None, 0)
if length == 0:
raise ctypes.WinError()
# now decode
buf = ctypes.create_unicode_buffer(length)
length = winapi.MultiByteToWideChar(
codepage, 0, data, len(data), buf, length)
if length == 0:
raise ctypes.WinError()
return buf[:]
def _encode_codepage(codepage, text):
"""
Args:
codepage (int)
text (text)
Returns:
`bytes`
Encode text using the given code page. Will not fail if a char
can't be encoded using that codepage.
"""
assert isinstance(text, text_type)
if not text:
return b""
size = (len(text.encode("utf-16-le", _surrogatepass)) //
ctypes.sizeof(winapi.WCHAR))
# get the required buffer size
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, None, 0, None, None)
if length == 0:
raise ctypes.WinError()
# decode to the buffer
buf = ctypes.create_string_buffer(length)
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, buf, length, None, None)
if length == 0:
raise ctypes.WinError()
return buf[:length]
def _readline_windows_fallback():
# In case reading from the console failed (maybe we get piped data)
# we assume the input was generated according to the output encoding.
# Got any better ideas?
assert is_win
cp = winapi.GetConsoleOutputCP()
data = getattr(sys.stdin, "buffer", sys.stdin).readline().rstrip(b"\r\n")
return _decode_codepage(cp, data)
def _readline_default():
assert is_unix
data = getattr(sys.stdin, "buffer", sys.stdin).readline().rstrip(b"\r\n")
if PY3:
return data.decode(_encoding, "surrogateescape")
else:
return data
def _readline():
if is_win:
return _readline_windows()
else:
return _readline_default()
def input_(prompt=None):
"""
Args:
prompt (object): Prints the passed object to stdout without
adding a trailing newline
Returns:
`fsnative`
Raises:
EnvironmentError
Like :func:`python3:input` but returns a `fsnative` and allows printing
filenames as prompt to stdout.
Use :func:`fsn2text` on the result if you just want to deal with text.
"""
if prompt is not None:
print_(prompt, end="")
return _readline()
def _get_file_name_for_handle(handle):
"""(Windows only) Returns a file name for a file handle.
Args:
handle (winapi.HANDLE)
Returns:
`text` or `None` if no file name could be retrieved.
"""
assert is_win
assert handle != winapi.INVALID_HANDLE_VALUE
size = winapi.FILE_NAME_INFO.FileName.offset + \
winapi.MAX_PATH * ctypes.sizeof(winapi.WCHAR)
buf = ctypes.create_string_buffer(size)
if winapi.GetFileInformationByHandleEx is None:
# Windows XP
return None
status = winapi.GetFileInformationByHandleEx(
handle, winapi.FileNameInfo, buf, size)
if status == 0:
return None
name_info = ctypes.cast(
buf, ctypes.POINTER(winapi.FILE_NAME_INFO)).contents
offset = winapi.FILE_NAME_INFO.FileName.offset
data = buf[offset:offset + name_info.FileNameLength]
return bytes2fsn(data, "utf-16-le")
def supports_ansi_escape_codes(fd):
"""Returns whether the output device is capable of interpreting ANSI escape
codes when :func:`print_` is used.
Args:
fd (int): file descriptor (e.g. ``sys.stdout.fileno()``)
Returns:
`bool`
"""
if os.isatty(fd):
return True
if not is_win:
return False
# Check for cygwin/msys terminal
handle = winapi._get_osfhandle(fd)
if handle == winapi.INVALID_HANDLE_VALUE:
return False
if winapi.GetFileType(handle) != winapi.FILE_TYPE_PIPE:
return False
file_name = _get_file_name_for_handle(handle)
match = re.match(
"^\\\\(cygwin|msys)-[a-z0-9]+-pty[0-9]+-(from|to)-master$", file_name)
return match is not None
|
mit
|
philipn/sycamore
|
Sycamore/support/pytz/zoneinfo/America/Indiana/Petersburg.py
|
9
|
5154
|
'''tzinfo timezone information for America/Indiana/Petersburg.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Petersburg(DstTzInfo):
'''America/Indiana/Petersburg timezone definition. See datetime.tzinfo for details'''
zone = 'America/Indiana/Petersburg'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,8,0,0),
d(1918,10,27,7,0,0),
d(1919,3,30,8,0,0),
d(1919,10,26,7,0,0),
d(1942,2,9,8,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,7,0,0),
d(1955,5,1,6,0,0),
d(1955,9,25,7,0,0),
d(1956,4,29,8,0,0),
d(1956,9,30,7,0,0),
d(1957,4,28,8,0,0),
d(1957,9,29,7,0,0),
d(1958,4,27,8,0,0),
d(1958,9,28,7,0,0),
d(1959,4,26,8,0,0),
d(1959,9,27,7,0,0),
d(1960,4,24,8,0,0),
d(1960,9,25,7,0,0),
d(1961,4,30,8,0,0),
d(1961,10,29,7,0,0),
d(1962,4,29,8,0,0),
d(1962,10,28,7,0,0),
d(1963,4,28,8,0,0),
d(1963,10,27,7,0,0),
d(1964,4,26,8,0,0),
d(1964,10,25,7,0,0),
d(1965,4,25,8,0,0),
d(1966,10,30,7,0,0),
d(1967,4,30,8,0,0),
d(1967,10,29,7,0,0),
d(1968,4,28,8,0,0),
d(1968,10,27,7,0,0),
d(1969,4,27,8,0,0),
d(1969,10,26,7,0,0),
d(1970,4,26,8,0,0),
d(1970,10,25,7,0,0),
d(1971,4,25,8,0,0),
d(1971,10,31,7,0,0),
d(1972,4,30,8,0,0),
d(1972,10,29,7,0,0),
d(1973,4,29,8,0,0),
d(1973,10,28,7,0,0),
d(1974,1,6,8,0,0),
d(1974,10,27,7,0,0),
d(1975,2,23,8,0,0),
d(1975,10,26,7,0,0),
d(1976,4,25,8,0,0),
d(1976,10,31,7,0,0),
d(1977,4,24,8,0,0),
d(1977,10,30,7,0,0),
d(2006,4,2,7,0,0),
d(2006,10,29,7,0,0),
d(2007,3,11,8,0,0),
d(2007,11,4,7,0,0),
d(2008,3,9,8,0,0),
d(2008,11,2,7,0,0),
d(2009,3,8,8,0,0),
d(2009,11,1,7,0,0),
d(2010,3,14,8,0,0),
d(2010,11,7,7,0,0),
d(2011,3,13,8,0,0),
d(2011,11,6,7,0,0),
d(2012,3,11,8,0,0),
d(2012,11,4,7,0,0),
d(2013,3,10,8,0,0),
d(2013,11,3,7,0,0),
d(2014,3,9,8,0,0),
d(2014,11,2,7,0,0),
d(2015,3,8,8,0,0),
d(2015,11,1,7,0,0),
d(2016,3,13,8,0,0),
d(2016,11,6,7,0,0),
d(2017,3,12,8,0,0),
d(2017,11,5,7,0,0),
d(2018,3,11,8,0,0),
d(2018,11,4,7,0,0),
d(2019,3,10,8,0,0),
d(2019,11,3,7,0,0),
d(2020,3,8,8,0,0),
d(2020,11,1,7,0,0),
d(2021,3,14,8,0,0),
d(2021,11,7,7,0,0),
d(2022,3,13,8,0,0),
d(2022,11,6,7,0,0),
d(2023,3,12,8,0,0),
d(2023,11,5,7,0,0),
d(2024,3,10,8,0,0),
d(2024,11,3,7,0,0),
d(2025,3,9,8,0,0),
d(2025,11,2,7,0,0),
d(2026,3,8,8,0,0),
d(2026,11,1,7,0,0),
d(2027,3,14,8,0,0),
d(2027,11,7,7,0,0),
d(2028,3,12,8,0,0),
d(2028,11,5,7,0,0),
d(2029,3,11,8,0,0),
d(2029,11,4,7,0,0),
d(2030,3,10,8,0,0),
d(2030,11,3,7,0,0),
d(2031,3,9,8,0,0),
d(2031,11,2,7,0,0),
d(2032,3,14,8,0,0),
d(2032,11,7,7,0,0),
d(2033,3,13,8,0,0),
d(2033,11,6,7,0,0),
d(2034,3,12,8,0,0),
d(2034,11,5,7,0,0),
d(2035,3,11,8,0,0),
d(2035,11,4,7,0,0),
d(2036,3,9,8,0,0),
d(2036,11,2,7,0,0),
d(2037,3,8,8,0,0),
d(2037,11,1,7,0,0),
]
_transition_info = [
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-18000,3600,'CPT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,0,'EST'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-18000,0,'EST'),
i(-18000,0,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
]
Petersburg = Petersburg()
|
gpl-2.0
|
pbanaszkiewicz/amy
|
amy/workshops/migrations/0099_dcselforganizedeventrequest.py
|
3
|
5636
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-18 08:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('workshops', '0098_auto_20160617_1731'),
]
operations = [
migrations.CreateModel(
name='DCSelfOrganizedEventRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254)),
('organization', models.CharField(max_length=255, verbose_name='University or organization affiliation')),
('instructor_status', models.CharField(blank=True, choices=[('', 'None'), ('incomplete', "Have gone through instructor training, but haven't yet completed checkout"), ('dc', 'Certified Data Carpentry instructor'), ('swc', 'Certified Software Carpentry instructor'), ('both', 'Certified Software and Data Carpentry instructor')], max_length=40, verbose_name='Your Software and Data Carpentry instructor status')),
('is_partner', models.CharField(blank=True, choices=[('y', 'Yes'), ('n', 'No'), ('u', 'Unsure'), ('', 'Other (enter below)')], max_length=1, verbose_name='Is your organization a Data Carpentry or Software Carpentry Partner')),
('is_partner_other', models.CharField(blank=True, default='', max_length=100, verbose_name='Other (is your organization a Partner?)')),
('location', models.CharField(help_text='City, Province or State', max_length=255, verbose_name='Location')),
('country', django_countries.fields.CountryField(max_length=2)),
('associated_conference', models.CharField(blank=True, default='', help_text='If the workshop is to be associated with a conference or meeting, which one?', max_length=100, verbose_name='Associated conference')),
('dates', models.CharField(help_text='Preferably in YYYY-MM-DD to YYYY-MM-DD format', max_length=255, verbose_name='Planned workshop dates')),
('domains_other', models.CharField(blank=True, default='', help_text='If none of the fields above works for you.', max_length=255, verbose_name='Other domains for the workshop')),
('topics_other', models.CharField(blank=True, default='', help_text='If none of the fields above works for you.', max_length=255, verbose_name='Other topics to be taught')),
('payment', models.CharField(choices=[('per_participant', 'I will contribute $25/participant through registration fees'), ('invoice', 'I will contribute $500 via an invoice'), ('credit_card', 'I will contribute $500 via a credit card payment'), ('fee_waiver', 'I would like to request a fee waiver')], default='per_participant', help_text='Self-organized workshops for non-Partner organizations are $500 or $25/participant for a workshop licensing fee (<a href="http://www.datacarpentry.org/self-organized-workshops/">http://www.datacarpentry.org/self-organized-workshops/</a>). Fee waivers are available and generally granted upon request.', max_length=40, verbose_name='Payment choice')),
('fee_waiver_reason', models.CharField(blank=True, default='', max_length=255, verbose_name='Reason for requesting a fee waiver')),
('handle_registration', models.BooleanField(default=False, verbose_name='I confirm that I will handle registration for this workshop')),
('distribute_surveys', models.BooleanField(default=False, verbose_name='I confirm that I will distribute the Data Carpentry surveys to workshop participants')),
('follow_code_of_conduct', models.BooleanField(default=False, verbose_name='I confirm that I will follow the Data Carpentry Code of Conduct')),
('assigned_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('attendee_academic_levels', models.ManyToManyField(help_text='If you know the academic level(s) of your attendees, indicate them here.', to='workshops.AcademicLevel', verbose_name="Attendees' academic level")),
('attendee_data_analysis_level', models.ManyToManyField(help_text="If you know, indicate learner's general level of data analysis experience", to='workshops.DataAnalysisLevel', verbose_name="Attendees' level of data analysis experience")),
('domains', models.ManyToManyField(help_text="Set of lessons you're going to teach", to='workshops.DCWorkshopDomain', verbose_name='Domain for the workshop')),
('topics', models.ManyToManyField(help_text='A Data Carpentry workshop must include a Data Carpentry lesson on data organization and the other modules in the same domain from the Data Carpentry curriculum (see <a href="http://www.datacarpentry.org/workshops/">http://www.datacarpentry.org/workshops/</a>). If you do want to include materials not in our curriculum, please note that below and we\'ll get in touch.', to='workshops.DCWorkshopTopic', verbose_name='Topics to be taught')),
],
options={
'abstract': False,
},
),
]
|
mit
|
tscohen/chainer
|
tests/cupy_tests/test_ndarray_copy_and_view.py
|
21
|
1982
|
import unittest
import numpy
from cupy import testing
@testing.gpu
class TestArrayCopyAndView(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.numpy_cupy_array_equal()
def test_view(self, xp):
a = testing.shaped_arange((4,), xp, dtype=numpy.float32)
b = a.view(dtype=numpy.int32)
b[:] = 0
return a
@testing.numpy_cupy_array_equal()
def test_flatten(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return a.flatten()
@testing.numpy_cupy_array_equal()
def test_flatten_copied(self, xp):
a = testing.shaped_arange((4,), xp)
b = a.flatten()
a[:] = 1
return b
@testing.numpy_cupy_array_equal()
def test_transposed_flatten(self, xp):
a = testing.shaped_arange((2, 3, 4), xp).transpose(2, 0, 1)
return a.flatten()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_fill(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
a.fill(1)
return a
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_transposed_fill(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = a.transpose(2, 0, 1)
b.fill(1)
return b
@testing.for_all_dtypes(name='src_dtype')
@testing.for_all_dtypes(name='dst_dtype')
@testing.numpy_cupy_array_equal()
def test_astype(self, xp, src_dtype, dst_dtype):
a = testing.shaped_arange((2, 3, 4), xp, src_dtype)
return a.astype(dst_dtype)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_diagonal1(self, xp, dtype):
a = testing.shaped_arange((3, 4, 5), xp, dtype)
return a.diagonal(1, 2, 0)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_diagonal2(self, xp, dtype):
a = testing.shaped_arange((3, 4, 5), xp, dtype)
return a.diagonal(-1, 2, 0)
|
mit
|
tiredpixel/pikka-bird-collector-py
|
pikka_bird_collector/parsers/redis.py
|
1
|
1120
|
import re
from .base import Base
class Redis(Base):
"""
Parses main Redis INFO-type format.
e.g.
# Clients
connected_clients:8
client_longest_raw_list:0
client_biggest_input_buf:0
blocked_clients:0
"""
RE_SECTION = re.compile(r'# (?P<section>.+)')
RE_SETTING = re.compile(r'(?P<k>\w+):(?P<v>.*)')
def parse2(self, raw):
section = None
for row in raw.split('\n'):
m_section = Redis.RE_SECTION.match(row)
if m_section:
section = self.converter_key(m_section.group('section'))
self.ds[section] = {}
else:
m_setting = Redis.RE_SETTING.match(row)
if m_setting:
k = self.converter_key(m_setting.group('k'))
v = self.converter_value(m_setting.group('v'))
if section is None:
self.ds[k] = v
else:
self.ds[section][k] = v
return self.ds
|
mit
|
contactr2m/remote_repo
|
src/profiles/migrations/0001_initial.py
|
1
|
1387
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-13 21:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts_user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('slug', models.UUIDField(blank=True, default=uuid.uuid4, editable=False)),
('picture', models.ImageField(blank=True, null=True, upload_to='profile_pics/%Y-%m-%d/', verbose_name='Profile picture')),
('avatar_url', models.CharField(blank=True, max_length=256, null=True)),
('bio', models.CharField(blank=True, max_length=200, null=True, verbose_name='Short Bio')),
('email_verified', models.BooleanField(default=False, verbose_name='Email verified')),
('dob', models.DateField(blank=True, null=True, verbose_name='dob')),
],
options={
'abstract': False,
'db_table': 'user_profile',
},
),
]
|
mit
|
jmoiron/jmoiron.net
|
jmoiron/comments/models.py
|
1
|
1654
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""comment data models."""
import datetime
import argot
import operator
from jmoiron.utils import summarize
from flask import Blueprint, render_template
from micromongo import *
__all__ = ["Comment", "blueprint"]
blueprint = Blueprint("comments", __name__,
template_folder="templates",
static_folder="static",
)
class Comment(Model):
collection = "jmoiron.comment"
spec = {
"comment": Field(required=True, default="", type=basestring),
"url": Field(required=True, default="", type=basestring),
"email": Field(required=True, default=""),
"name": Field(required=True, default="", type=basestring),
"ip_address": Field(required=True, type=basestring),
"timestamp": Field(required=True),
"needs_moderation": Field(required=True, default=False, type=bool),
"id": Field(required=True, type=int),
"object": Field(required=True), # probably a tuple, (collection, _id)
}
def pre_save(self):
if not self.id:
self.id = Comment.find().count()
if not self.timestamp:
self.timestamp = datetime.now()
self.rendered = argot.render(self.comment)
@classmethod
def for_objects(cls, *objects):
"""For a bunch of objects that have a "comments", replace the oids in
their lists with the actual embedded comment objects. Returns the oid
mapping for comments."""
oids = reduce(operator.add, [o.get("comments", []) for o in objects])
comments = cls.find({"_id": {"$in": oids}})
return dict([(c._id, c) for c in comments])
|
mit
|
nbeck90/city-swap
|
cityswap/cityswap/settings.py
|
1
|
2690
|
"""
Django settings for cityswap project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vqkgu^x)#ofqa5_e94h!31gcbp!zv7^&en%ykxu3d2kfx3hsz)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = True
# Application definition
INSTALLED_APPS = (
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'profiles',
'registration',
'requests',
'sorl.thumbnail'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'cityswap.urls'
WSGI_APPLICATION = 'cityswap.wsgi.application'
USER_NAME = os.environ.get('USER')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'city_swap_db',
'USER': USER_NAME,
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/cityswap/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(
os.path.dirname(__file__),
'static',
),
)
MEDIA_URL = 'media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'cityswap/templates'),
)
|
mit
|
sdopoku/flask-blog
|
env/lib/python2.7/site-packages/jinja2/meta.py
|
336
|
4198
|
# -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
from jinja2._compat import string_types
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def pull_locals(self, frame):
"""Remember all undeclared identifiers."""
self.undeclared_identifiers.update(frame.identifiers.undeclared)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == set(['bar'])
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
|
gpl-2.0
|
svanlochem/Paparazzi
|
sw/ground_segment/python/ivytoredis/ivy_to_redis.py
|
39
|
2358
|
#!/usr/bin/env python
from __future__ import print_function
import redis
import time
import signal
import argparse
import sys
import os
# if PAPARAZZI_SRC not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../../')))
PPRZ_LIB_PYTHON = os.path.join(PPRZ_SRC, "sw/lib/python")
sys.path.append(PPRZ_LIB_PYTHON)
from ivy_msg_interface import IvyMessagesInterface
server = None
class Ivy2RedisServer():
def __init__(self, redishost, redisport, verbose=False):
self.verbose = verbose
self.interface = IvyMessagesInterface(self.message_recv)
self.r = redis.StrictRedis(host=redishost, port=redisport, db=0)
self.keep_running = True
print("Connected to redis server %s on port %i" % (redishost, redisport))
def message_recv(self, ac_id, msg):
# if ac_id is not 0 (i.e. telemetry from an aircraft) include it in the key
# don't add it to the key for ground messages
if ac_id:
key = "{0}.{1}.{2}".format(msg.msg_class, msg.name, ac_id)
else:
key = "{0}.{1}".format(msg.msg_class, msg.name)
if self.verbose:
print("received message, key=%s, msg=%s" % (key, msg.to_json(payload_only=True)))
sys.stdout.flush()
self.r.publish(key, msg.to_json(payload_only=True))
self.r.set(key, msg.to_json(payload_only=True))
def run(self):
while self.keep_running:
time.sleep(0.1)
def stop(self):
self.keep_running = False
self.interface.shutdown()
def signal_handler(signal, frame):
global server
server.stop()
def main():
global server
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--server", help="hostname here redis runs", default="localhost")
parser.add_argument("-p", "--port", help="port used by redis", type=int, default=6379)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true")
args = parser.parse_args()
server = Ivy2RedisServer(args.server, args.port, args.verbose)
signal.signal(signal.SIGINT, signal_handler)
server.run()
if __name__ == '__main__':
main()
|
gpl-2.0
|
nuxeh/keystone
|
keystone/trust/core.py
|
1
|
9580
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Identity service."""
import abc
from oslo_config import cfg
from oslo_log import log
import six
from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone.i18n import _
from keystone import notifications
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@dependency.requires('identity_api')
@dependency.provider('trust_api')
class Manager(manager.Manager):
"""Default pivot point for the Trust backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
_TRUST = "OS-TRUST:trust"
def __init__(self):
super(Manager, self).__init__(CONF.trust.driver)
@staticmethod
def _validate_redelegation(redelegated_trust, trust):
# Validate against:
# 0 < redelegation_count <= max_redelegation_count
max_redelegation_count = CONF.trust.max_redelegation_count
redelegation_depth = redelegated_trust.get('redelegation_count', 0)
if not (0 < redelegation_depth <= max_redelegation_count):
raise exception.Forbidden(
_('Remaining redelegation depth of %(redelegation_depth)d'
' out of allowed range of [0..%(max_count)d]') %
{'redelegation_depth': redelegation_depth,
'max_count': max_redelegation_count})
# remaining_uses is None
remaining_uses = trust.get('remaining_uses')
if remaining_uses is not None:
raise exception.Forbidden(
_('Field "remaining_uses" is set to %(value)s'
' while it must not be set in order to redelegate a trust'),
value=remaining_uses)
# expiry times
trust_expiry = trust.get('expires_at')
redelegated_expiry = redelegated_trust['expires_at']
if trust_expiry:
# redelegated trust is from backend and has no tzinfo
if redelegated_expiry < trust_expiry.replace(tzinfo=None):
raise exception.Forbidden(
_('Requested expiration time is more '
'than redelegated trust can provide'))
else:
trust['expires_at'] = redelegated_expiry
# trust roles is a subset of roles of the redelegated trust
parent_roles = set(role['id']
for role in redelegated_trust['roles'])
if not all(role['id'] in parent_roles for role in trust['roles']):
raise exception.Forbidden(
_('Some of requested roles are not in redelegated trust'))
def get_trust_pedigree(self, trust_id):
trust = self.driver.get_trust(trust_id)
trust_chain = [trust]
if trust and trust.get('redelegated_trust_id'):
trusts = self.driver.list_trusts_for_trustor(
trust['trustor_user_id'])
while trust_chain[-1].get('redelegated_trust_id'):
for t in trusts:
if t['id'] == trust_chain[-1]['redelegated_trust_id']:
trust_chain.append(t)
break
return trust_chain
def get_trust(self, trust_id, deleted=False):
trust = self.driver.get_trust(trust_id, deleted)
if trust and trust.get('redelegated_trust_id') and not deleted:
trust_chain = self.get_trust_pedigree(trust_id)
for parent, child in zip(trust_chain[1:], trust_chain):
self._validate_redelegation(parent, child)
try:
self.identity_api.assert_user_enabled(
parent['trustee_user_id'])
except (AssertionError, exception.NotFound):
raise exception.Forbidden(
_('One of the trust agents is disabled or deleted'))
return trust
def create_trust(self, trust_id, trust, roles, redelegated_trust=None,
initiator=None):
"""Create a new trust.
:returns: a new trust
"""
# Default for initial trust in chain is max_redelegation_count
max_redelegation_count = CONF.trust.max_redelegation_count
requested_count = trust.get('redelegation_count')
redelegatable = (trust.pop('allow_redelegation', False)
and requested_count != 0)
if not redelegatable:
trust['redelegation_count'] = requested_count = 0
remaining_uses = trust.get('remaining_uses')
if remaining_uses is not None and remaining_uses <= 0:
msg = _('remaining_uses must be a positive integer or null.')
raise exception.ValidationError(msg)
else:
# Validate requested redelegation depth
if requested_count and requested_count > max_redelegation_count:
raise exception.Forbidden(
_('Requested redelegation depth of %(requested_count)d '
'is greater than allowed %(max_count)d') %
{'requested_count': requested_count,
'max_count': max_redelegation_count})
# Decline remaining_uses
if 'remaining_uses' in trust:
exception.ValidationError(_('remaining_uses must not be set '
'if redelegation is allowed'))
if redelegated_trust:
trust['redelegated_trust_id'] = redelegated_trust['id']
remaining_count = redelegated_trust['redelegation_count'] - 1
# Validate depth consistency
if (redelegatable and requested_count and
requested_count != remaining_count):
msg = _('Modifying "redelegation_count" upon redelegation is '
'forbidden. Omitting this parameter is advised.')
raise exception.Forbidden(msg)
trust.setdefault('redelegation_count', remaining_count)
# Check entire trust pedigree validity
pedigree = self.get_trust_pedigree(redelegated_trust['id'])
for t in pedigree:
self._validate_redelegation(t, trust)
trust.setdefault('redelegation_count', max_redelegation_count)
ref = self.driver.create_trust(trust_id, trust, roles)
notifications.Audit.created(self._TRUST, trust_id, initiator=initiator)
return ref
def delete_trust(self, trust_id, initiator=None):
"""Remove a trust.
:raises: keystone.exception.TrustNotFound
Recursively remove given and redelegated trusts
"""
trust = self.driver.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id)
trusts = self.driver.list_trusts_for_trustor(
trust['trustor_user_id'])
for t in trusts:
if t.get('redelegated_trust_id') == trust_id:
# recursive call to make sure all notifications are sent
try:
self.delete_trust(t['id'])
except exception.TrustNotFound:
# if trust was deleted by concurrent process
# consistency must not suffer
pass
# end recursion
self.driver.delete_trust(trust_id)
notifications.Audit.deleted(self._TRUST, trust_id, initiator)
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
@abc.abstractmethod
def create_trust(self, trust_id, trust, roles):
"""Create a new trust.
:returns: a new trust
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_trust(self, trust_id, deleted=False):
"""Get a trust by the trust id.
:param trust_id: the trust identifier
:type trust_id: string
:param deleted: return the trust even if it is deleted, expired, or
has no consumptions left
:type deleted: bool
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_trusts(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_trusts_for_trustee(self, trustee):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_trusts_for_trustor(self, trustor):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_trust(self, trust_id):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def consume_use(self, trust_id):
"""Consume one use when a trust was created with a limitation on its
uses, provided there are still uses available.
:raises: keystone.exception.TrustUseLimitReached,
keystone.exception.TrustNotFound
"""
raise exception.NotImplemented() # pragma: no cover
|
apache-2.0
|
sandymanu/android_kernel_xiaomi_kenzo
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
John443/Recommending-System
|
src/item_pred.py
|
1
|
1843
|
import numpy as np
import utils
import parse
import similarity_functions as sf
import calculate as cal
import math
import copy
def constructMatrix(data, metadata):
user = int(metadata['users'])
item = int(metadata['items'])
ratingMatrix = np.zeros((item, user))
for i in data:
ratingMatrix[int(i[1]) - 1][int(i[0]) - 1] = i[2]
return ratingMatrix
def content_boosted(setno=0):
training, test, metadata = parse.load(setno)
ratingMatrix = constructMatrix(training, metadata)
simMat = sf.cosineMatrix(ratingMatrix)
np.savetxt('../output/siml.txt', simMat)
predict = utils.predictRating(simMat, ratingMatrix)
np.savetxt('../output/pred.txt', predict)
userRating = utils.constructRatingMatrix(training, metadata)
v = np.copy(userRating)
user = int(metadata['users'])
item = int(metadata['items'])
for i in range(user):
for j in range(item):
if v[i][j] == 0:
v[i][j] = predict[j][i]
np.savetxt('../output/virt.txt', v)
np.savetxt('../output/ratingMatrix.txt', ratingMatrix)
hw = cal.getHwau(ratingMatrix.transpose())
sw = utils.calculateSW(ratingMatrix.transpose())
simMat = sf.cosineMatrix(ratingMatrix)
np.savetxt('../output/similar.txt', simMat)
print 'sim done!'
prediction = utils.contentBoostPred(simMat, ratingMatrix, hw, sw, v)
np.savetxt('../output/predict.txt', prediction)
print 'prediction done!'
predictionOnTest = prediction[test[:, 0].astype(int) - 1, test[:, 1].astype(int) - 1]
error = predictionOnTest - test[:, 2]
return np.abs(error).mean()
if __name__ == '__main__':
for i in xrange(1, 6):
file = open('../output/result.txt', 'a+')
mean = content_boosted(setno=i)
print mean
file.write('%s\n' % mean)
file.close()
|
gpl-3.0
|
rooshilp/CMPUT410Lab6
|
virt_env/virt1/lib/python2.7/site-packages/django/db/migrations/state.py
|
34
|
14420
|
from __future__ import unicode_literals
from django.apps import AppConfig
from django.apps.registry import Apps, apps as global_apps
from django.db import models
from django.db.models.options import DEFAULT_NAMES, normalize_together
from django.db.models.fields.related import do_pending_lookups
from django.db.models.fields.proxy import OrderWrt
from django.conf import settings
from django.utils import six
from django.utils.encoding import force_text, smart_text
from django.utils.module_loading import import_string
class InvalidBasesError(ValueError):
pass
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
self.apps = None
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
def add_model_state(self, model_state):
self.models[(model_state.app_label, model_state.name.lower())] = model_state
def clone(self):
"Returns an exact copy of this ProjectState"
return ProjectState(
models=dict((k, v.clone()) for k, v in self.models.items()),
real_apps=self.real_apps,
)
def render(self, include_real=None, ignore_swappable=False, skip_cache=False):
"Turns the project state into actual models in a new Apps"
if self.apps is None or skip_cache:
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
real_models = []
for app_label in self.real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = set(model_state.app_label for model_state in self.models.values())
self.apps = Apps([AppConfigStub(label) for label in sorted(self.real_apps + list(app_labels))])
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
unrendered_models = list(self.models.values()) + real_models
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self.apps)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/1.7/topics/migrations/#dependencies "
"for more" % new_unrendered_models
)
unrendered_models = new_unrendered_models
# make sure apps has no dangling references
if self.apps._pending_lookups:
# There's some lookups left. See if we can first resolve them
# ourselves - sometimes fields are added after class_prepared is sent
for lookup_model, operations in self.apps._pending_lookups.items():
try:
model = self.apps.get_model(lookup_model[0], lookup_model[1])
except LookupError:
app_label = "%s.%s" % (lookup_model[0], lookup_model[1])
if app_label == settings.AUTH_USER_MODEL and ignore_swappable:
continue
# Raise an error with a best-effort helpful message
# (only for the first issue). Error message should look like:
# "ValueError: Lookup failed for model referenced by
# field migrations.Book.author: migrations.Author"
msg = "Lookup failed for model referenced by field {field}: {model[0]}.{model[1]}"
raise ValueError(msg.format(field=operations[0][1], model=lookup_model))
else:
do_pending_lookups(model)
try:
return self.apps
finally:
if skip_cache:
self.apps = None
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name.lower())] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
if set(self.real_apps) != set(other.real_apps):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
super(AppConfigStub, self).__init__(label, None)
def import_models(self, all_models):
self.models = all_models
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None):
self.app_label = app_label
self.name = force_text(name)
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
# Sanity-check that fields are NOT already bound to a model.
for name, field in fields:
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
@classmethod
def from_model(cls, model, exclude_rels=False):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "rel", None) and exclude_rels:
continue
if isinstance(field, OrderWrt):
continue
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s.%s: %s" % (
name,
model._meta.app_label,
model._meta.object_name,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
else:
options[name] = model._meta.original_attrs[name]
# Force-convert all options to text_type (#23226)
options = cls.force_text_recursive(options)
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
"%s.%s" % (base._meta.app_label, base._meta.model_name)
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, six.string_types) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
)
@classmethod
def force_text_recursive(cls, value):
if isinstance(value, six.string_types):
return smart_text(value)
elif isinstance(value, list):
return [cls.force_text_recursive(x) for x in value]
elif isinstance(value, tuple):
return tuple(cls.force_text_recursive(x) for x in value)
elif isinstance(value, set):
return set(cls.force_text_recursive(x) for x in value)
elif isinstance(value, dict):
return dict(
(cls.force_text_recursive(k), cls.force_text_recursive(v))
for k, v in value.items()
)
return value
def construct_fields(self):
"Deep-clone the fields using deconstruction"
for name, field in self.fields:
_, path, args, kwargs = field.deconstruct()
field_class = import_string(path)
yield name, field_class(*args, **kwargs)
def clone(self):
"Returns an exact copy of this ModelState"
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=list(self.construct_fields()),
options=dict(self.options),
bases=self.bases,
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
meta = type(str("Meta"), tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = dict(self.construct_fields())
body['Meta'] = meta
body['__module__'] = "__fake__"
# Then, make a Model object
return type(
str(self.name),
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def __repr__(self):
return "<ModelState: '%s.%s'>" % (self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:]))
for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases)
)
def __ne__(self, other):
return not (self == other)
|
apache-2.0
|
Plourde-Research-Lab/PyQLab
|
instruments/DCSources.py
|
2
|
1333
|
from atom.api import Str, Int, Float, Bool, Enum
from .Instrument import Instrument
class DCSource(Instrument):
output = Bool(False).tag(desc='Output enabled')
mode = Enum('voltage', 'current').tag(desc='Output mode (current or voltage source)')
value = Float(0.0).tag(desc='Output value (current or voltage)')
class YokoGS200(DCSource):
outputRange = Enum(1e-3, 10e-3, 100e-3, 200e-3, 1, 10, 30).tag(desc='Output range')
def json_encode(self, matlabCompatible=False):
jsonDict = super(YokoGS200, self).json_encode(matlabCompatible)
if matlabCompatible:
jsonDict['range'] = jsonDict.pop('outputRange')
return jsonDict
class SIM928(Instrument):
ch1Value = Float(0.0).tag(desc="Ch 1 Voltage (V)")
ch2Value = Float(0.0).tag(desc="Ch 2 Voltage (V)")
ch3Value = Float(0.0).tag(desc="Ch 3 Voltage (V)")
class TekAFG3022B(DCSource):
outputRange = Enum(1e-3, 10e-3, 100e-3, 200e-3, 1.0, 10.0, 20.0).tag(desc='Output range')
channel = Int(2).tag(desc='Tek channel')
width = Float(10.0).tag(desc='Pulse width (ns)')
def json_encode(self, matlabCompatible=False):
jsonDict = super(TekAFG3022B, self).json_encode(matlabCompatible)
jsonDict['channel'] = jsonDict.pop('channel')
jsonDict['width'] = jsonDict.pop('width')
if matlabCompatible:
jsonDict['range'] = jsonDict.pop('outputRange')
return jsonDict
|
apache-2.0
|
peterfpeterson/mantid
|
Testing/SystemTests/tests/framework/EQSANSEffAPIv2.py
|
3
|
2186
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=no-init,attribute-defined-outside-init
import systemtesting
from mantid.simpleapi import *
from reduction_workflow.instruments.sans.sns_command_interface import *
from reduction_workflow.instruments.sans.hfir_command_interface import SensitivityCorrection, SetTransmission
from mantid.api import FileFinder
import os
class EQSANSEff(systemtesting.MantidSystemTest):
def cleanup(self):
absfile = FileFinder.getFullPath("EQSANS_1466_event_reduction.log")
if os.path.exists(absfile):
os.remove(absfile)
return True
def runTest(self):
"""
System test for sensitivity correction
"""
configI = ConfigService.Instance()
configI["facilityName"]='SNS'
EQSANS(False)
AppendDataFile("EQSANS_1466_event.nxs")
SolidAngle()
UseConfig(False)
UseConfigTOFTailsCutoff(False)
UseConfigMask(False)
SetBeamCenter(96.29, 126.15)
SetTransmission(1.0, 0.0)
TotalChargeNormalization(normalize_to_beam=False)
SensitivityCorrection("EQSANS_4061_event.nxs", min_sensitivity=0.5, max_sensitivity=1.5, dark_current=None, use_sample_dc=False)
Reduce1D()
Scale(InputWorkspace="EQSANS_1466_event_Iq", Factor=277.781,
Operation='Multiply', OutputWorkspace="EQSANS_1466_event_Iq")
def validate(self):
# Be more tolerant with the output, mainly because of the errors.
# The following tolerance check the errors up to the third digit.
mtd["EQSANS_1466_event_Iq"].dataE(0)[0]=8.13907
self.tolerance = 0.1
self.disableChecking.append('Instrument')
self.disableChecking.append('Sample')
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Axes')
return "EQSANS_1466_event_Iq", 'EQSANSEff.nxs'
|
gpl-3.0
|
MountainWei/nova
|
nova/api/openstack/compute/schemas/server_metadata.py
|
109
|
1333
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'metadata': parameter_types.metadata
},
'required': ['metadata'],
'additionalProperties': False,
}
metadata_update = copy.deepcopy(parameter_types.metadata)
metadata_update.update({
'minProperties': 1,
'maxProperties': 1
})
update = {
'type': 'object',
'properties': {
'meta': metadata_update
},
'required': ['meta'],
'additionalProperties': False,
}
update_all = {
'type': 'object',
'properties': {
'metadata': parameter_types.metadata
},
'required': ['metadata'],
'additionalProperties': False,
}
|
apache-2.0
|
YutingZhang/lmdis-rep
|
net_modules/pixel_bias.py
|
1
|
2627
|
import tensorflow as tf
import collections
from prettytensor import layers
from prettytensor import parameters
from prettytensor import pretty_tensor_class as prettytensor
from prettytensor.pretty_tensor_class import PROVIDED
@prettytensor.Register(assign_defaults=('activation_fn', 'parameter_modifier', 'phase'))
class pixel_bias(prettytensor.VarStoreMethod):
def __call__(
self, input_layer, activation_fn=None, bias=tf.zeros_initializer(), phase=prettytensor.Phase.train,
parameter_modifier=parameters.identity, name=PROVIDED
):
"""
Adds the parameters for a fully connected layer and returns a tensor.
The current PrettyTensor must have rank 2.
Args:
input_layer: The Pretty Tensor object, supplied.
size: The number of neurons
bias: An initializer for the bias or a Tensor. No bias if set to None.
phase: The phase of graph construction. See `pt.Phase`.
parameter_modifier: A function to modify parameters that is applied after
creation and before use.
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
A Pretty Tensor handle to the layer.
Raises:
ValueError: if the Pretty Tensor is not rank 2 or the number of input
nodes (second dim) is not known.
"""
if input_layer.get_shape().ndims != 4:
raise ValueError(
'pixel_bias requires a rank 4 Tensor with known second '
'dimension: %s' % input_layer.get_shape())
if input_layer.shape[1] is None or input_layer.shape[2] is None or input_layer.shape[3] is None:
raise ValueError('input size must be known.')
x = input_layer.tensor
dtype = input_layer.dtype
books = input_layer.bookkeeper
b = parameter_modifier(
'bias',
self.variable('bias', input_layer.shape[2:], bias, dt=dtype),
phase)
y = x + tf.expand_dims(b, axis=0)
if activation_fn is not None:
if not isinstance(activation_fn, collections.Sequence):
activation_fn = (activation_fn,)
y = layers.apply_activation(books,
y,
activation_fn[0],
activation_args=activation_fn[1:])
books.add_histogram_summary(y, '%s/activations' % y.op.name)
return input_layer.with_tensor(y, parameters=self.vars)
# pylint: enable=invalid-name
|
apache-2.0
|
est77/appleseed
|
src/appleseed.studio/python/studio/plugins.py
|
6
|
2935
|
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017-2018 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import imp
import traceback
def load_plugins(bundled_plugins_path):
if os.path.isdir(bundled_plugins_path):
print("Loading Python plugins from {0}...".format(bundled_plugins_path))
load_plugins_from_dir(bundled_plugins_path)
else:
print("Skipping loading Python plugins from {0} since that directory does not exist.".format(bundled_plugins_path))
user_plugins_path = os.environ.get('APPLESEED_STUDIO_PLUGIN_PATH')
if user_plugins_path is not None:
print("Loading Python plugins from {0}...".format(user_plugins_path))
load_plugins_from_dir(user_plugins_path)
def load_plugins_from_dir(bundled_plugins_path):
for plugin in os.listdir(bundled_plugins_path):
plugin_path = os.path.join(bundled_plugins_path, plugin)
if os.path.isdir(plugin_path):
load_plugin(plugin_path)
def load_plugin(plugin_path):
path, name = os.path.split(plugin_path)
name, ext = os.path.splitext(name)
try:
file, filename, data = imp.find_module(name, [path])
plugin_module = imp.load_module(name, file, filename, data)
except ImportError as e:
print("Plugin '{0}' could not be imported: {1}".format(name, e))
return
if not hasattr(plugin_module, 'register'):
print("Plugin '{0}' has no register function.".format(name))
return
try:
plugin_module.register()
except Exception as e:
print("Could not initialize plugin '{0}': {1}".format(name, e))
traceback.print_exc()
return
print("Plugin '{0}' successfully imported.".format(name))
|
mit
|
naresh21/synergetics-edx-platform
|
lms/djangoapps/course_blocks/transformers/start_date.py
|
15
|
2896
|
"""
Start Date Transformer implementation.
"""
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer, FilteringTransformerMixin
from lms.djangoapps.courseware.access_utils import check_start_date
from xmodule.course_metadata_utils import DEFAULT_START_DATE
from .utils import collect_merged_date_field
class StartDateTransformer(FilteringTransformerMixin, BlockStructureTransformer):
"""
A transformer that enforces the 'start' and 'days_early_for_beta'
fields on blocks by removing blocks from the block structure for
which the user does not have access. The 'start' field on a
block is percolated down to its descendants, so that all blocks
enforce the 'start' field from their ancestors. The assumed
'start' value for a block is then the maximum of its parent and its
own.
For a block with multiple parents, the assumed parent start date
value is a computed minimum of the start dates of all its parents.
So as long as one parent chain allows access, the block has access.
Staff users are exempted from visibility rules.
"""
VERSION = 1
MERGED_START_DATE = 'merged_start_date'
@classmethod
def name(cls):
"""
Unique identifier for the transformer's class;
same identifier used in setup.py.
"""
return "start_date"
@classmethod
def _get_merged_start_date(cls, block_structure, block_key):
"""
Returns the merged value for the start date for the block with
the given block_key in the given block_structure.
"""
return block_structure.get_transformer_block_field(
block_key, cls, cls.MERGED_START_DATE, False
)
@classmethod
def collect(cls, block_structure):
"""
Collects any information that's necessary to execute this
transformer's transform method.
"""
block_structure.request_xblock_fields('days_early_for_beta')
collect_merged_date_field(
block_structure,
transformer=cls,
xblock_field_name='start',
merged_field_name=cls.MERGED_START_DATE,
default_date=DEFAULT_START_DATE,
func_merge_parents=min,
func_merge_ancestors=max,
)
def transform_block_filters(self, usage_info, block_structure):
# Users with staff access bypass the Start Date check.
if usage_info.has_staff_access:
return [block_structure.create_universal_filter()]
removal_condition = lambda block_key: not check_start_date(
usage_info.user,
block_structure.get_xblock_field(block_key, 'days_early_for_beta'),
self._get_merged_start_date(block_structure, block_key),
usage_info.course_key,
)
return [block_structure.create_removal_filter(removal_condition)]
|
agpl-3.0
|
scotthartbti/android_external_chromium_org
|
mojo/public/bindings/generators/mojom_pack_tests.py
|
26
|
5268
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mojom
import mojom_pack
import mojom_test
import sys
EXPECT_EQ = mojom_test.EXPECT_EQ
EXPECT_TRUE = mojom_test.EXPECT_TRUE
RunTest = mojom_test.RunTest
def TestOrdinalOrder():
errors = 0
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT32, 2)
struct.AddField('testfield2', mojom.INT32, 1)
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(2, len(ps.packed_fields))
errors += EXPECT_EQ('testfield2', ps.packed_fields[0].field.name)
errors += EXPECT_EQ('testfield1', ps.packed_fields[1].field.name)
return errors
def TestZeroFields():
errors = 0
struct = mojom.Struct('test')
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(0, len(ps.packed_fields))
return errors
def TestOneField():
errors = 0
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT8)
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(1, len(ps.packed_fields))
return errors
# Pass three tuples.
# |kinds| is a sequence of mojom.Kinds that specify the fields that are to
# be created.
# |fields| is the expected order of the resulting fields, with the integer
# "1" first.
# |offsets| is the expected order of offsets, with the integer "0" first.
def TestSequence(kinds, fields, offsets):
errors = 0
struct = mojom.Struct('test')
index = 1
for kind in kinds:
struct.AddField("%d" % index, kind)
index += 1
ps = mojom_pack.PackedStruct(struct)
num_fields = len(ps.packed_fields)
errors += EXPECT_EQ(len(kinds), num_fields)
for i in xrange(num_fields):
EXPECT_EQ("%d" % fields[i], ps.packed_fields[i].field.name)
EXPECT_EQ(offsets[i], ps.packed_fields[i].offset)
return errors
def TestPaddingPackedInOrder():
return TestSequence(
(mojom.INT8, mojom.UINT8, mojom.INT32),
(1, 2, 3),
(0, 1, 4))
def TestPaddingPackedOutOfOrder():
return TestSequence(
(mojom.INT8, mojom.INT32, mojom.UINT8),
(1, 3, 2),
(0, 1, 4))
def TestPaddingPackedOverflow():
kinds = (mojom.INT8, mojom.INT32, mojom.INT16, mojom.INT8, mojom.INT8)
# 2 bytes should be packed together first, followed by short, then by int.
fields = (1, 4, 3, 2, 5)
offsets = (0, 1, 2, 4, 8)
return TestSequence(kinds, fields, offsets)
def TestAllTypes():
struct = mojom.Struct('test')
array = mojom.Array()
return TestSequence(
(mojom.BOOL, mojom.INT8, mojom.STRING, mojom.UINT8,
mojom.INT16, mojom.DOUBLE, mojom.UINT16,
mojom.INT32, mojom.UINT32, mojom.INT64,
mojom.FLOAT, mojom.STRING, mojom.HANDLE,
mojom.UINT64, mojom.Struct('test'), mojom.Array()),
(1, 2, 4, 5, 7, 3, 6, 8, 9, 10, 11, 13, 12, 14, 15, 16, 17),
(0, 1, 2, 4, 6, 8, 16, 24, 28, 32, 40, 44, 48, 56, 64, 72, 80))
def TestPaddingPackedOutOfOrderByOrdinal():
errors = 0
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT8)
struct.AddField('testfield3', mojom.UINT8, 3)
struct.AddField('testfield2', mojom.INT32, 2)
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(3, len(ps.packed_fields))
# Second byte should be packed in behind first, altering order.
errors += EXPECT_EQ('testfield1', ps.packed_fields[0].field.name)
errors += EXPECT_EQ('testfield3', ps.packed_fields[1].field.name)
errors += EXPECT_EQ('testfield2', ps.packed_fields[2].field.name)
# Second byte should be packed with first.
errors += EXPECT_EQ(0, ps.packed_fields[0].offset)
errors += EXPECT_EQ(1, ps.packed_fields[1].offset)
errors += EXPECT_EQ(4, ps.packed_fields[2].offset)
return errors
def TestBools():
errors = 0
struct = mojom.Struct('test')
struct.AddField('bit0', mojom.BOOL)
struct.AddField('bit1', mojom.BOOL)
struct.AddField('int', mojom.INT32)
struct.AddField('bit2', mojom.BOOL)
struct.AddField('bit3', mojom.BOOL)
struct.AddField('bit4', mojom.BOOL)
struct.AddField('bit5', mojom.BOOL)
struct.AddField('bit6', mojom.BOOL)
struct.AddField('bit7', mojom.BOOL)
struct.AddField('bit8', mojom.BOOL)
ps = mojom_pack.PackedStruct(struct)
errors += EXPECT_EQ(10, len(ps.packed_fields))
# First 8 bits packed together.
for i in xrange(8):
pf = ps.packed_fields[i]
errors += EXPECT_EQ(0, pf.offset)
errors += EXPECT_EQ("bit%d" % i, pf.field.name)
errors += EXPECT_EQ(i, pf.bit)
# Ninth bit goes into second byte.
errors += EXPECT_EQ("bit8", ps.packed_fields[8].field.name)
errors += EXPECT_EQ(1, ps.packed_fields[8].offset)
errors += EXPECT_EQ(0, ps.packed_fields[8].bit)
# int comes last.
errors += EXPECT_EQ("int", ps.packed_fields[9].field.name)
errors += EXPECT_EQ(4, ps.packed_fields[9].offset)
return errors
def Main(args):
errors = 0
errors += RunTest(TestZeroFields)
errors += RunTest(TestOneField)
errors += RunTest(TestPaddingPackedInOrder)
errors += RunTest(TestPaddingPackedOutOfOrder)
errors += RunTest(TestPaddingPackedOverflow)
errors += RunTest(TestAllTypes)
errors += RunTest(TestPaddingPackedOutOfOrderByOrdinal)
errors += RunTest(TestBools)
return errors
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
bsd-3-clause
|
gpmidi/fragforce.org
|
fforg/settings.py
|
1
|
16780
|
"""
Django settings for fforg project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from datetime import timedelta
import dj_database_url
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DEBUG', 'True').lower() == 'true')
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'INSECURE')
if SECRET_KEY == 'INSECURE':
if DEBUG:
import warnings
warnings.warn('INSECURE SECRET_KEY!', RuntimeWarning)
else:
raise ValueError("SECRET_KEY env var must be defined when not in DEBUG=True")
# FIXME: Add LOGZ.IO Logging
LOGZIO_API_KEY = os.environ.get('LOGZIO_API_KEY', None)
# Application definition
STREAM_DASH_BASE = os.environ.get("STREAM_DASH_BASE", "https://stream.fragforce.org")
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'memoize',
'ffsite',
'ffsfdc',
'ffdonations',
'ffstream',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fforg.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'ffsite.ctx.common_org',
'ffdonations.ctx.donations',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'fforg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'hc': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db-hc.sqlite3'),
},
}
DATABASE_ROUTERS = ["fforg.router.HCRouter", ]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
DATABASES['hc'].update(dj_database_url.config(conn_max_age=500, ssl_require=True, env="HC_RO_URL"))
try:
DATABASES['hc']['OPTIONS']['options'] = '-c search_path=%s' % os.environ.get('HC_RO_SCHEMA', 'org')
except KeyError as e:
pass
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
SECURE_SSL_REDIRECT = True
# Heroku auto set
HEROKU_APP_ID = os.environ.get('HEROKU_APP_ID', None)
HEROKU_APP_NAME = os.environ.get('HEROKU_APP_NAME', None)
HEROKU_RELEASE_CREATED_AT = os.environ.get('HEROKU_RELEASE_CREATED_AT', None)
HEROKU_RELEASE_VERSION = os.environ.get('HEROKU_RELEASE_VERSION', 'v1')
HEROKU_RELEASE_VERSION_NUM = int(HEROKU_RELEASE_VERSION.lstrip('v'))
HEROKU_SLUG_COMMIT = os.environ.get('HEROKU_SLUG_COMMIT', None)
HEROKU_SLUG_DESCRIPTION = os.environ.get('HEROKU_SLUG_DESCRIPTION', None)
SINGAPORE_DONATIONS = float(os.environ.get('SINGAPORE_DONATIONS', '0.0'))
OTHER_DONATIONS = float(os.environ.get('OTHER_DONATIONS', '0.0'))
TARGET_DONATIONS = float(os.environ.get('TARGET_DONATIONS', '1.0'))
# Cache version prefix
VERSION = int(HEROKU_RELEASE_VERSION_NUM)
# Max rows for api to return
MAX_API_ROWS = int(os.environ.get('MAX_API_ROWS', 1024))
if os.environ.get('REDIS_URL', None):
REDIS_URL_DEFAULT = 'redis://localhost'
# Base URL - Needs DB ID added
REDIS_URL_BASE = os.environ.get('REDIS_URL', REDIS_URL_DEFAULT)
# Don't use DB 0 for anything
REDIS_URL_DEFAULT = REDIS_URL_BASE + "/0"
# Celery tasks
REDIS_URL_TASKS = REDIS_URL_BASE + "/1"
# Celery tombstones (aka results)
REDIS_URL_TOMBS = REDIS_URL_BASE + "/2"
# Misc timers
REDIS_URL_TIMERS = REDIS_URL_BASE + "/3"
# Django cache
REDIS_URL_DJ_CACHE = REDIS_URL_BASE + "/4"
elif os.environ.get('REDIS0_URL', None):
REDIS_URL_DEFAULT = 'redis://localhost'
# Base URL - Needs DB ID added
REDIS_URL_BASE = REDIS_URL_DEFAULT
# Don't use DB 0 for anything
REDIS_URL_DEFAULT = os.environ.get('REDIS0_URL', 'redis://localhost') + "/0"
# Celery tasks
REDIS_URL_TASKS = os.environ.get('REDIS1_URL', 'redis://localhost') + "/0"
# Celery tombstones (aka results)
REDIS_URL_TOMBS = os.environ.get('REDIS2_URL', 'redis://localhost') + "/0"
# Misc timers
REDIS_URL_TIMERS = os.environ.get('REDIS3_URL', 'redis://localhost') + "/0"
# Django cache
REDIS_URL_DJ_CACHE = os.environ.get('REDIS4_URL', 'redis://localhost') + "/0"
else:
REDIS_URL_DEFAULT = 'redis://localhost'
# Base URL - Needs DB ID added
REDIS_URL_BASE = REDIS_URL_DEFAULT
# Don't use DB 0 for anything
REDIS_URL_DEFAULT = os.environ.get('REDIS0_URL', 'redis://localhost') + "/0"
# Celery tasks
REDIS_URL_TASKS = os.environ.get('REDIS1_URL', 'redis://localhost') + "/0"
# Celery tombstones (aka results)
REDIS_URL_TOMBS = os.environ.get('REDIS2_URL', 'redis://localhost') + "/0"
# Misc timers
REDIS_URL_TIMERS = os.environ.get('REDIS3_URL', 'redis://localhost') + "/0"
# Django cache
REDIS_URL_DJ_CACHE = os.environ.get('REDIS4_URL', 'redis://localhost') + "/0"
CELERY_ACCEPT_CONTENT = ['json', ]
CELERY_TASK_TRACK_STARTED = True
CELERY_TASK_ACKS_LATE = True
CELERY_BROKER_URL = REDIS_URL_TASKS
CELERY_RESULT_BACKEND = REDIS_URL_TOMBS
CELERY_WORKER_HIJACK_ROOT_LOGGER = False
GOOGLE_ANALYTICS_ID = os.environ.get('GOOGLE_ANALYTICS_ID', None)
MAX_UPCOMING_EVENTS = int(os.environ.get('MAX_UPCOMING_EVENTS', 20))
MAX_PAST_EVENTS = int(os.environ.get('MAX_PAST_EVENTS', 20))
MAX_ALL_EVENTS = int(os.environ.get('MAX_ALL_EVENTS', 20))
TILTIFY_TOKEN = os.environ.get('TILTIFY_TOKEN', None)
TILTIFY_TIMEOUT = int(os.environ.get('TILTIFY_TIMEOUT', 60))
TILTIFY_APP_OWNER = os.environ.get('TILTIFY_APP_OWNER', HEROKU_APP_NAME)
# Various view cache timeouts
VIEW_TEAMS_CACHE = int(os.environ.get('VIEW_TEAMS_CACHE', 20))
VIEW_PARTICIPANTS_CACHE = int(os.environ.get('VIEW_PARTICIPANTS_CACHE', 20))
VIEW_DONATIONS_CACHE = int(os.environ.get('VIEW_DONATIONS_CACHE', 20))
VIEW_DONATIONS_STATS_CACHE = int(os.environ.get('VIEW_DONATIONS_STATS_CACHE', 20))
VIEW_SITE_EVENT_CACHE = int(os.environ.get('VIEW_SITE_EVENT_CACHE', 60))
VIEW_SITE_SITE_CACHE = int(os.environ.get('VIEW_SITE_SITE_CACHE', 60))
VIEW_SITE_STATIC_CACHE = int(os.environ.get('VIEW_SITE_STATIC_CACHE', 300))
# Min time between team updates - Only cares about tracked teams!
EL_TEAM_UPDATE_FREQUENCY_MIN = timedelta(minutes=int(os.environ.get('EL_TEAM_UPDATE_FREQUENCY_MIN', 30)))
# Max time between updates for any given team - Only cares about tracked teams!
EL_TEAM_UPDATE_FREQUENCY_MAX = timedelta(minutes=int(os.environ.get('EL_TEAM_UPDATE_FREQUENCY_MAX', 120)))
# How often to check for updates
EL_TEAM_UPDATE_FREQUENCY_CHECK = timedelta(minutes=int(os.environ.get('EL_TEAM_UPDATE_FREQUENCY_CHECK', 5)))
# Min time between participants updates - Only cares about tracked participants!
EL_PTCP_UPDATE_FREQUENCY_MIN = timedelta(minutes=int(os.environ.get('EL_PTCP_UPDATE_FREQUENCY_MIN', 120)))
# Max time between updates for any given participants - Only cares about tracked participants!
EL_PTCP_UPDATE_FREQUENCY_MAX = timedelta(minutes=int(os.environ.get('EL_PTCP_UPDATE_FREQUENCY_MAX', 300)))
# How often to check for updates
EL_PTCP_UPDATE_FREQUENCY_CHECK = timedelta(minutes=int(os.environ.get('EL_PTCP_UPDATE_FREQUENCY_CHECK', 30)))
# Min time between donation list updates - Only cares about tracked teams/participants!
EL_DON_UPDATE_FREQUENCY_MIN = timedelta(minutes=int(os.environ.get('EL_DON_UPDATE_FREQUENCY_MIN', 60)))
# Max time between updates for any given donation list - Only cares about tracked teams/participants!
EL_DON_UPDATE_FREQUENCY_MAX = timedelta(minutes=int(os.environ.get('EL_DON_UPDATE_FREQUENCY_MAX', 300)))
# How often to check for updates
EL_DON_UPDATE_FREQUENCY_CHECK = timedelta(minutes=int(os.environ.get('EL_DON_UPDATE_FREQUENCY_CHECK', 15)))
# Min time between donation list updates for a team - Only cares about tracked teams
EL_DON_TEAM_UPDATE_FREQUENCY_MIN = timedelta(minutes=int(os.environ.get('EL_DON_TEAM_UPDATE_FREQUENCY_MIN', 5)))
# Max time between updates of donations for any given team - Only cares about tracked teams
EL_DON_TEAM_UPDATE_FREQUENCY_MAX = timedelta(minutes=int(os.environ.get('EL_DON_TEAM_UPDATE_FREQUENCY_MAX', 15)))
# Min time between donation list updates for a participants - Only cares about tracked participants
EL_DON_PTCP_UPDATE_FREQUENCY_MIN = timedelta(minutes=int(os.environ.get('EL_DON_PTCP_UPDATE_FREQUENCY_MIN', 5)))
# Max time between updates of donations for any given participants - Only cares about tracked participants
EL_DON_PTCP_UPDATE_FREQUENCY_MAX = timedelta(minutes=int(os.environ.get('EL_DON_PTCP_UPDATE_FREQUENCY_MAX', 15)))
# Min time between EL REST requests
EL_REQUEST_MIN_TIME = timedelta(seconds=int(os.environ.get('EL_REQUEST_MIN_TIME_SECONDS', 15)))
# Min time between EL REST requests for any given URL
EL_REQUEST_MIN_TIME_URL = timedelta(seconds=int(os.environ.get('EL_REQUEST_MIN_TIME_URL_SECONDS', 120)))
# Min time between request for any given remote host
REQUEST_MIN_TIME_HOST = timedelta(seconds=int(os.environ.get('REQUEST_MIN_TIME_HOST_SECONDS', 5)))
# How often to check for updates
TIL_TEAMS_UPDATE_FREQUENCY_CHECK = timedelta(minutes=int(os.environ.get('TIL_TEAMS_UPDATE_FREQUENCY_CHECK', 10)))
# How long to wait in seconds after getting a parent before fetching any children
TF_UPDATE_WAIT = timedelta(seconds=int(os.environ.get('TF_UPDATE_WAIT', 120)))
# Comma seperated list of tiltify teams (the slugs or IDs) to monitor
TILTIFY_TEAMS = os.environ.get('TILTIFY_TEAMS', 'fragforce').split(',')
# Cache Configuration
if REDIS_URL_BASE and REDIS_URL_BASE == REDIS_URL_DEFAULT:
# Dev and release config
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
else:
def make_key_hash(key, key_prefix, version):
""" Create a hashed key"""
import hashlib
m = hashlib.sha512()
m.update(':'.join([key_prefix, str(version), key]))
return m.hexdigest()
def make_key_nohash(key, key_prefix, version):
return ':'.join([key_prefix, str(version), key])
if os.environ.get('DJANGO_CACHE_HASH', 'false').lower() == 'true':
make_key = make_key_hash
else:
make_key = make_key_nohash
# Real config
CACHES = {
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': REDIS_URL_DJ_CACHE,
'TIMEOUT': int(os.environ.get('REDIS_DJ_TIMEOUT', 300)),
'OPTIONS': {
'PARSER_CLASS': 'redis.connection.HiredisParser',
'SOCKET_TIMEOUT': int(os.environ.get('REDIS_DJ_SOCKET_TIMEOUT', 5)),
'SOCKET_CONNECT_TIMEOUT': int(os.environ.get('REDIS_DJ_SOCKET_CONNECT_TIMEOUT', 3)),
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': int(os.environ.get('REDIS_DJ_POOL_MAX_CONN', 5)),
'timeout': int(os.environ.get('REDIS_DJ_POOL_TIMEOUT', 3)),
},
# 'SERIALIZER_CLASS': 'redis_cache.serializers.JSONSerializer',
# 'SERIALIZER_CLASS_KWARGS': {},
# Used to auto flush cache when new builds happen :-D
'VERSION': HEROKU_RELEASE_VERSION_NUM,
'KEY_PREFIX': '_'.join([str(HEROKU_APP_ID), str(HEROKU_APP_NAME)]),
'KEY_FUNCTION': make_key,
},
},
}
if os.environ.get('DJANGO_COMPRESS_REDIS', 'false').lower() == 'true':
CACHES['default']['OPTIONS']['COMPRESSOR_CLASS'] = 'redis_cache.compressors.ZLibCompressor'
CACHES['default']['OPTIONS']['COMPRESSOR_CLASS_KWARGS'] = {
# level = 0 - 9
# 0 - no compression
# 1 - fastest, biggest
# 9 - slowest, smallest
'level': int(os.environ.get('DJANGO_COMPRESS_REDIS_ZLIB_LEVEL', 1)),
}
# Second to last
CELERY_BEAT_SCHEDULE = {
'update-all-teams': {
'task': 'ffdonations.tasks.teams.update_teams_if_needed',
'schedule': EL_TEAM_UPDATE_FREQUENCY_CHECK,
},
'update-all-participants': {
'task': 'ffdonations.tasks.participants.update_participants_if_needed',
'schedule': EL_PTCP_UPDATE_FREQUENCY_CHECK,
},
'update-all-donations': {
'task': 'ffdonations.tasks.donations.update_donations_if_needed',
'schedule': EL_DON_UPDATE_FREQUENCY_CHECK,
},
'til-update-all-teams': {
'task': 'ffdonations.tasks.tiltify.teams.update_teams',
'schedule': TIL_TEAMS_UPDATE_FREQUENCY_CHECK,
},
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'logzioFormat': {
'format': '{"source": "django"}'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'verbose'
},
'logzio': {
'class': 'logzio.handler.LogzioHandler',
'level': 'DEBUG',
'formatter': 'logzioFormat',
'token': LOGZIO_API_KEY,
'logzio_type': "django",
'logs_drain_timeout': 5,
'url': 'https://listener.logz.io:8071',
'debug': True
},
},
'loggers': {
'django': {
'handlers': ['console', 'logzio'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO')
},
'root': {
'handlers': ['console', 'logzio'],
# 'level': 'INFO'
},
'': {
'handlers': ['console', 'logzio'],
# 'level': 'INFO'
},
}
}
# Activate Django-Heroku - Very last
django_heroku.settings(locals())
|
gpl-2.0
|
tinysun212/swift-windows
|
tools/SourceKit/bindings/python/sourcekitd/capi.py
|
60
|
17369
|
# capi.py - sourcekitd Python Bindings -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from ctypes import (
CFUNCTYPE,
POINTER,
Structure,
addressof,
c_bool,
c_char_p,
c_int,
c_int64,
c_size_t,
c_uint64,
c_void_p,
cdll,
py_object,
string_at,
)
# ctypes doesn't implicitly convert c_void_p to the appropriate wrapper
# object. This is a problem, because it means that from_parameter will see an
# integer and pass the wrong value on platforms where int != void*. Work around
# this by marshalling object arguments as void**.
c_object_p = POINTER(c_void_p)
callbacks = {}
# Structures and Utility Classes
class CachedProperty(object):
"""Decorator that lazy-loads the value of a property.
The first time the property is accessed, the original property function is
executed. The value it returns is set as the new value of that instance's
property, replacing the original method.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except AttributeError:
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
class Object(object):
def __init__(self, obj):
import sys
if sys.version_info > (3,):
long = int
if isinstance(obj, Object):
self._obj = conf.lib.sourcekitd_request_retain(obj)
elif isinstance(obj, (int, long, bool)):
self._obj = conf.lib.sourcekitd_request_int64_create(obj)
elif isinstance(obj, str):
self._obj = conf.lib.sourcekitd_request_string_create(obj)
elif isinstance(obj, UIdent):
self._obj = conf.lib.sourcekitd_request_uid_create(obj)
elif isinstance(obj, dict):
self._obj = conf.lib.sourcekitd_request_dictionary_create(
POINTER(c_void_p)(), POINTER(c_void_p)(), 0)
self._as_parameter_ = self._obj
for k, v in obj.iteritems():
conf.lib.sourcekitd_request_dictionary_set_value(
self,
UIdent(k), Object(v))
elif isinstance(obj, (list, tuple)):
self._obj = conf.lib.sourcekitd_request_array_create(
POINTER(c_void_p)(), 0)
self._as_parameter_ = self._obj
for v in obj:
conf.lib.sourcekitd_request_array_set_value(
self, -1, Object(v))
else:
raise ValueError("wrong init parameter (%s)" % type(obj))
self._as_parameter_ = self._obj
def from_param(self):
return self._as_parameter_
def __del__(self):
if self._obj:
conf.lib.sourcekitd_request_release(self)
def __repr__(self):
ptr = conf.lib.sourcekitd_request_description_copy(self)
s = string_at(ptr)
conf.free(ptr)
return s
class Response(object):
def __init__(self, obj):
if isinstance(obj, c_object_p):
self._obj = self._as_parameter_ = obj
else:
raise ValueError("wrong init parameter (%s)" % type(obj))
def get_payload(self):
return conf.lib.sourcekitd_response_get_value(self)
def from_param(self):
return self._as_parameter_
def __del__(self):
if self._obj:
conf.lib.sourcekitd_response_dispose(self)
def __repr__(self):
ptr = conf.lib.sourcekitd_response_description_copy(self)
s = string_at(ptr)
conf.free(ptr)
return s
class UIdent(object):
def __init__(self, obj):
if isinstance(obj, c_object_p):
self._obj = obj
elif isinstance(obj, UIdent):
self._obj = obj._obj
elif isinstance(obj, str):
self._obj = conf.lib.sourcekitd_uid_get_from_cstr(obj)
else:
raise ValueError("wrong init parameter (%s)" % type(obj))
self._as_parameter_ = self._obj
def __str__(self):
return conf.lib.sourcekitd_uid_get_string_ptr(self)
def from_param(self):
return self._as_parameter_
def __repr__(self):
return "UIdent('%s')" % self.__str__()
def _ptr(self):
return addressof(self._obj.contents)
def __eq__(self, other):
return self._ptr() == UIdent(other)._ptr()
def __ne__(self, other):
return self._ptr() != UIdent(other)._ptr()
def __hash__(self):
return hash(self._ptr())
class ErrorKind(object):
"""Describes the kind of type."""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(ErrorKind._kinds):
ErrorKind._kinds += [None] * (value - len(ErrorKind._kinds) + 1)
if ErrorKind._kinds[value] is not None:
raise ValueError('ErrorKind already loaded')
self.value = value
ErrorKind._kinds[value] = self
ErrorKind._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this error kind."""
if self._name_map is None:
self._name_map = {}
for key, value in ErrorKind.__dict__.items():
if isinstance(value, ErrorKind):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(ErrorKind._kinds) or ErrorKind._kinds[id] is None:
raise ValueError('Unknown type kind {}'.format(id))
return ErrorKind._kinds[id]
def __repr__(self):
return 'ErrorKind.%s' % (self.name,)
ErrorKind.CONNECTION_INTERRUPTED = ErrorKind(1)
ErrorKind.REQUEST_INVALID = ErrorKind(2)
ErrorKind.REQUEST_FAILED = ErrorKind(3)
ErrorKind.REQUEST_CANCELLED = ErrorKind(4)
class Variant(Structure):
_fields_ = [
("data", c_uint64 * 3)]
def to_python_object(self):
var_ty = conf.lib.sourcekitd_variant_get_type(self)
if var_ty == VariantType.NULL:
return None
elif var_ty == VariantType.DICTIONARY:
return self.to_python_dictionary()
elif var_ty == VariantType.ARRAY:
return self.to_python_array()
elif var_ty == VariantType.INT64:
return conf.lib.sourcekitd_variant_int64_get_value(self)
elif var_ty == VariantType.STRING:
return conf.lib.sourcekitd_variant_string_get_ptr(self)
elif var_ty == VariantType.UID:
return UIdent(conf.lib.sourcekitd_variant_uid_get_value(self))
else:
assert(var_ty == VariantType.BOOL)
return conf.lib.sourcekitd_variant_bool_get_value(self)
def to_python_array(self):
def applier(index, value, arr):
arr.append(value.to_python_object())
# continue
return 1
arr = []
conf.lib.sourcekitd_variant_array_apply_f(
self, callbacks['array_applier'](applier), arr)
return arr
def to_python_dictionary(self):
def applier(cobj, value, d):
d[str(UIdent(cobj))] = value.to_python_object()
# continue
return 1
d = {}
conf.lib.sourcekitd_variant_dictionary_apply_f(
self, callbacks['dictionary_applier'](applier), d)
return d
class VariantType(object):
"""Describes the kind of type."""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(VariantType._kinds):
VariantType._kinds += [None] * \
(value - len(VariantType._kinds) + 1)
if VariantType._kinds[value] is not None:
raise ValueError('VariantType already loaded')
self.value = value
VariantType._kinds[value] = self
VariantType._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this variant type."""
if self._name_map is None:
self._name_map = {}
for key, value in VariantType.__dict__.items():
if isinstance(value, VariantType):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(VariantType._kinds) or VariantType._kinds[id] is None:
raise ValueError('Unknown type kind {}'.format(id))
return VariantType._kinds[id]
def __repr__(self):
return 'VariantType.%s' % (self.name,)
VariantType.NULL = VariantType(0)
VariantType.DICTIONARY = VariantType(1)
VariantType.ARRAY = VariantType(2)
VariantType.INT64 = VariantType(3)
VariantType.STRING = VariantType(4)
VariantType.UID = VariantType(5)
VariantType.BOOL = VariantType(6)
# Now comes the plumbing to hook up the C library.
# Register callback types in common container.
callbacks['array_applier'] = CFUNCTYPE(c_int, c_size_t, Variant, py_object)
callbacks['dictionary_applier'] = CFUNCTYPE(
c_int, c_object_p, Variant, py_object)
# Functions strictly alphabetical order.
functionList = [
("sourcekitd_cancel_request",
[c_void_p]),
("sourcekitd_initialize",
None),
("sourcekitd_request_array_create",
[POINTER(c_object_p), c_size_t],
c_object_p),
("sourcekitd_request_array_set_int64",
[Object, c_size_t, c_int64]),
("sourcekitd_request_array_set_string",
[Object, c_size_t, c_char_p]),
("sourcekitd_request_array_set_stringbuf",
[Object, c_size_t, c_char_p, c_size_t]),
("sourcekitd_request_array_set_uid",
[Object, c_size_t, UIdent]),
("sourcekitd_request_array_set_value",
[Object, c_size_t, Object]),
("sourcekitd_request_create_from_yaml",
[c_char_p, POINTER(c_char_p)],
c_object_p),
("sourcekitd_request_description_copy",
[Object],
c_void_p),
("sourcekitd_request_description_dump",
[Object]),
("sourcekitd_request_dictionary_create",
[POINTER(c_object_p), POINTER(c_object_p), c_size_t],
c_object_p),
("sourcekitd_request_dictionary_set_int64",
[Object, UIdent, c_int64]),
("sourcekitd_request_dictionary_set_string",
[Object, UIdent, c_char_p]),
("sourcekitd_request_dictionary_set_stringbuf",
[Object, UIdent, c_char_p, c_size_t]),
("sourcekitd_request_dictionary_set_uid",
[Object, UIdent, UIdent]),
("sourcekitd_request_dictionary_set_value",
[Object, UIdent, Object]),
("sourcekitd_request_int64_create",
[c_int64],
c_object_p),
("sourcekitd_request_retain",
[Object],
c_object_p),
("sourcekitd_request_release",
[Object]),
("sourcekitd_request_string_create",
[c_char_p],
c_object_p),
("sourcekitd_request_uid_create",
[UIdent],
c_object_p),
("sourcekitd_response_description_copy",
[Response],
c_char_p),
("sourcekitd_response_description_dump",
[Response]),
("sourcekitd_response_description_dump_filedesc",
[Response, c_int]),
("sourcekitd_response_dispose",
[Response]),
("sourcekitd_response_error_get_description",
[Response],
c_char_p),
("sourcekitd_response_error_get_kind",
[Response],
ErrorKind.from_id),
("sourcekitd_response_get_value",
[Response],
Variant),
("sourcekitd_response_is_error",
[Response],
c_bool),
("sourcekitd_send_request_sync",
[Object],
c_object_p),
("sourcekitd_shutdown",
None),
("sourcekitd_uid_get_from_buf",
[c_char_p, c_size_t],
c_object_p),
("sourcekitd_uid_get_from_cstr",
[c_char_p],
c_object_p),
("sourcekitd_uid_get_length",
[UIdent],
c_size_t),
("sourcekitd_uid_get_string_ptr",
[UIdent],
c_char_p),
("sourcekitd_variant_array_apply_f",
[Variant, callbacks['array_applier'], py_object],
c_bool),
("sourcekitd_variant_array_get_bool",
[Variant, c_size_t],
c_bool),
("sourcekitd_variant_array_get_count",
[Variant],
c_size_t),
("sourcekitd_variant_array_get_int64",
[Variant, c_size_t],
c_int64),
("sourcekitd_variant_array_get_string",
[Variant, c_size_t],
c_char_p),
("sourcekitd_variant_array_get_uid",
[Variant, c_size_t],
c_object_p),
("sourcekitd_variant_array_get_value",
[Variant, c_size_t],
Variant),
("sourcekitd_variant_bool_get_value",
[Variant],
c_bool),
("sourcekitd_variant_dictionary_apply_f",
[Variant, callbacks['dictionary_applier'], py_object],
c_bool),
("sourcekitd_variant_dictionary_get_bool",
[Variant, UIdent],
c_bool),
("sourcekitd_variant_dictionary_get_int64",
[Variant, UIdent],
c_int64),
("sourcekitd_variant_dictionary_get_string",
[Variant, UIdent],
c_char_p),
("sourcekitd_variant_dictionary_get_value",
[Variant, UIdent],
Variant),
("sourcekitd_variant_dictionary_get_uid",
[Variant, UIdent],
c_object_p),
("sourcekitd_variant_get_type",
[Variant],
VariantType.from_id),
("sourcekitd_variant_string_get_length",
[Variant],
c_size_t),
("sourcekitd_variant_string_get_ptr",
[Variant],
c_char_p),
("sourcekitd_variant_int64_get_value",
[Variant],
c_int64),
("sourcekitd_variant_uid_get_value",
[Variant],
c_object_p),
]
class LibsourcekitdError(Exception):
def __init__(self, message):
self.m = message
def __str__(self):
return self.m
def register_function(lib, item, ignore_errors):
# A function may not exist, if these bindings are used with an older or
# incompatible version of sourcekitd.
try:
func = getattr(lib, item[0])
except AttributeError as e:
msg = str(e) + ". Please ensure that your Python bindings are "\
"compatible with your sourcekitd version."
if ignore_errors:
return
raise LibsourcekitdError(msg)
if len(item) >= 2:
func.argtypes = item[1]
if len(item) >= 3:
func.restype = item[2]
if len(item) == 4:
func.errcheck = item[3]
def register_functions(lib, ignore_errors):
"""Register function prototypes with a sourcekitd library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library.
"""
def register(item):
return register_function(lib, item, ignore_errors)
map(register, functionList)
class Config(object):
library_path = None
library_file = None
loaded = False
@staticmethod
def set_library_path(path):
"""Set the path in which to search for sourcekitd"""
if Config.loaded:
raise Exception("library path must be set before before using "
"any other functionalities in sourcekitd.")
Config.library_path = path
@staticmethod
def set_library_file(filename):
"""Set the exact location of sourcekitd"""
if Config.loaded:
raise Exception("library file must be set before before using "
"any other functionalities in sourcekitd.")
Config.library_file = filename
@CachedProperty
def lib(self):
lib = self.get_sourcekitd_library()
register_functions(lib, False)
Config.loaded = True
return lib
@CachedProperty
def free(self):
free = cdll.LoadLibrary('libc.dylib').free
free.argtypes = [c_void_p]
return free
def get_filename(self):
if Config.library_file:
return Config.library_file
import platform
name = platform.system()
if name == 'Darwin':
# The XPC service cannot run via the bindings due to permissions
# issue.
# file = 'sourcekitd.framework/sourcekitd'
file = 'libsourcekitdInProc.dylib'
elif name == 'Windows':
file = 'sourcekitd.dll'
else:
file = 'sourcekitd.so'
if Config.library_path:
file = Config.library_path + '/' + file
return file
def get_sourcekitd_library(self):
try:
library = cdll.LoadLibrary(self.get_filename())
except OSError as e:
msg = str(e) + ". To provide a path to sourcekitd use " \
"Config.set_library_path() or " \
"Config.set_library_file()."
raise LibsourcekitdError(msg)
return library
conf = Config()
conf.lib.sourcekitd_initialize()
__all__ = [
'Config',
'Object',
'Response',
'UIdent',
'ErrorKind',
'Variant',
'VariantType'
]
|
apache-2.0
|
PyBossa/pybossa
|
pybossa/pro_features.py
|
1
|
1648
|
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
class ProFeatureHandler(object):
def __init__(self, config):
self.config = config
def auditlog_enabled_for(self, user):
if not self.config.get('auditlog'):
return True
return user.is_authenticated() and (user.admin or user.pro)
def webhooks_enabled_for(self, user):
if not self.config.get('webhooks'):
return True
return user.is_authenticated() and (user.admin or user.pro)
def autoimporter_enabled_for(self, user):
if not self.config.get('autoimporter'):
return True
return user.is_authenticated() and (user.admin or user.pro)
def better_stats_enabled_for(self, user, owner):
if not self.config.get('better_stats'):
return True
return owner.pro or user.is_authenticated() and user.admin
def only_for_pro(self, feature):
return self.config.get(feature)
|
agpl-3.0
|
sjsj0101/backtestengine
|
backtest/optimizer/compare.py
|
1
|
21663
|
# encoding: utf-8
"""
@version: python3.6
@author: ‘sj‘
@contact: [email protected]
@file: optimizer.py
@time: 10/20/17 10:06 PM
"""
from backtest.eventengine.eventEngine import *
from backtest.eventengine.eventType import *
from backtest.core.context import *
from backtest.data.getdata import *
from backtest.data.datatype import *
from backtest.data.datatools import *
from backtest.handlers.position import *
from backtest.handlers.portfolio import *
from backtest.tools.tools import *
from backtest.tools.ta import *
class Optimizer(object):
def __init__(self):
self.target_func = ''
class StrategyCompareDayTest(object):
def __init__(self):
self._engine = EventEngine()
self._engine.register(EVENT_ON_FEED, self._handle_data) # 处理tick事件
# self._engine.register(EVENT_ON_TICK, self._check_unfilled_orders) # 检查挂单
self._engine.register(EVENT_ORDER, self._portfolio) # 执行order事件
# self._engine.register(EVENT_FILL, self._fill) # 更新账户持仓...
self._engine.register(EVENT_AFTER_FILL, self._order_change)
self._engine.register(EVENT_NEXT_BAR, self._next_bar)
self._engine.register(EVENT_NEXT_TICK, self._next_tick)
self._engine.register(EVENT_NEXT_DAY, self._next_day)
self._engine.register(EVENT_DAY_END, self._handle_dayend)
self._engine.register(EVENT_OUTPUT, self._handle_output)
# context里保存所有需要的信息
self.context = BacktestContext()
self.context.run_info = RunInfo()
def initialize(self):
print('you must initilize account')
raise NotImplementedError
def handle_data(self, data):
print('you must write a strategy')
raise NotImplementedError
def order_change(self,data):
pass
def run(self):
self._engine.start()
self.context.timestart = datetime.datetime.now()
self.initialize()
date_start_int = datestrtoint(self.context.run_info.start_date)
date_end_int = datestrtoint(self.context.run_info.end_date)
datelist = GetTradeDates().get_date_list(date_start_int, date_end_int) # generate datelist
# print(datelist)
trade_days = len(datelist)
print('交易日共 %d 天' % trade_days)
self.context.datelist = iter(datelist) # generate datelist iterator
self.context.portfolio = Portfolio(self.context.init_cash) # initiate portfolio from init cash
self.context.portfolio.stats.backtestid = self.context.run_info.strategy_name # save backtest name to context
self._next_day() # start backtest
def _next_day(self,event={}):
try: # run datelist iterator
date = self.context.datelist.__next__()
self.context.date = date
print('日期:%d' %date)
self.context.portfolio.stats.dates.append(date)
self.context.current_contract = self.context.universe
if self.context.run_info.main_contract:
self.context.current_contract = self.__get_main_contract(date=date, symbols=self.context.universe, ip=self.context.run_info.ip)
if len(self.context.universe) == 1: # handle single instrument
self.context.instmt_info = InstmtInfoMongo(
symbol=self.context.current_contract[0]).get_instmt_info() # get future info
self.context.settlement_price = TradeDataMongo(self.context.current_contract[0], date, column=columns,
ip=self.context.run_info.ip).get_settlement_price() # get settlement price for daily summary
if self.context.run_info.feed_frequency == 'tick': # handle tick data
data_ticks = self.__get_tick(date, self.context.current_contract[0],ip=self.context.run_info.ip) # get tick data
self.context.data_day = data_ticks
self._next_tick()
elif self.context.run_info.feed_frequency in ['30s','1m','3m','5m','15m','30m','60m','1d']: # handle bar data
data_bars = self.__get_bar(date, self.context.current_contract[0], freq=self.context.run_info.feed_frequency,ip=self.context.run_info.ip)
self.context.data_day = data_bars
self._next_bar()
else:
print("you must specify a data feed type")
elif len(self.context.universe) > 1: # handle multi instruments
pass
except StopIteration: # when datelist interation ends, start output
event = Event(EVENT_OUTPUT)
self._engine.sendEvent(event)
def _next_bar(self, event={}): # handles bar feeds
try: # start bar interation within day
row = self.context.data_day.next()
bar_obj = create_bar_obj(row) # create bar object
# print(self.context.date, bar_obj.end_time, bar_obj.close)
self.context.current_bar = bar_obj # save current bar to context
event = Event(EVENT_ON_FEED)
event.dict = bar_obj
self.context.portfolio.update_portfolio(event.dict.close,time=str(self.context.date)+' '+self.context.current_bar.end_time) # update portfolio
self._engine.sendEvent(event)
except StopIteration: # when bar interation ends, start day end process
event =Event(EVENT_DAY_END)
self._engine.sendEvent(event)
# sleep(0.1)
def _next_tick(self, event={}): # handles tick feeds
try: # start bar interation within day
row = self.context.data_day.next()
self.context.current_tick = row # save current bar to context
event = Event(EVENT_ON_FEED)
event.dict = row
self.context.portfolio.update_portfolio(event.dict['LastPrice']) # update portfolio
self._engine.sendEvent(event)
except StopIteration: # when bar interation ends, start day end process
event =Event(EVENT_DAY_END)
self._engine.sendEvent(event)
def _handle_data(self, event):
self.handle_data(event.dict)
if not self.context.order_flag: # 若未产生交易,发送下一个数据
if self.context.run_info.feed_frequency == 'tick':
event = Event(EVENT_NEXT_BAR)
self._engine.sendEvent(event)
else:
event = Event(EVENT_NEXT_BAR)
self._engine.sendEvent(event)
else: # if order_flag, order itself will send move on signal
pass
def order(self, instrument_id, direction, offset, vol, limit_price=0, stop_price=0, contingent_condition='immediately'):
'''
下单
:param instrument_id: 合约id: 如'rb1801'
:param direction: DirectionType.买:Buy 卖:Sell
:param offset: OffsetFlagType.开:Open.__char__() 平:Close.__char__() 平今:CloseToday.__char__() 平昨:CloseYesterday__char__()
:param vol: 数量
:param limit_price: 限价为0则为市价单 不为0则为限价单
:param stop_price: 止损价为0则挂单条件为立即触发
:param contingent_condition: 触发条件 默认立即 可设置stop条件
:return:
'''
self.context.order_flag = True # 更改order flag
price_type = 'limit'
if limit_price == 0:
price_type = 'any'
event = Event(EVENT_ORDER)
event.dict = {
# 'tick':self.context.current_tick,
'symbol': instrument_id,
'vol': vol,
'limit_price': limit_price,
'pricetype': price_type,
'direction': direction,
'offset': offset,
'stop_price': stop_price,
'contingent_condition': contingent_condition
# 'cancel_flag': False
}
# print(self.context.current_data['endTime'])
# print(event.dict)
print("beforesend %s:" %datetime.datetime.now(),2)
self._engine.sendEvent(event)
def _portfolio(self, event):
order = event.dict
if self.context.instmt_info['commission_type'] == 'vol':
comm_o = self.context.instmt_info['opening_fee_by_num']
comm_t = self.context.instmt_info['closing_today_fee_by_num']
comm_y = self.context.instmt_info['closing_fee_by_num']
else:
comm_o = self.context.instmt_info['opening_fee_by_value']
comm_t = self.context.instmt_info['closing_today_fee_by_value']
comm_y = self.context.instmt_info['closing_fee_by_value']
margin = self.context.instmt_info['broker_margin']/100
contract_size = self.context.instmt_info['contract_size']
exch_code = self.context.instmt_info['exch_code']
time = self.context.current_bar.end_time
date = self.context.date
print('receive order %s:' %datetime.datetime.now(),3)
# print('order direction:%s offset:%s vol:%d price:%d date:%d time:%s' %
# (order['direction'],order['offset'],order['vol'],order['limit_price'],date,time))
if order['direction'] == BUY and order['offset'] == OPEN:
self.context.portfolio.modify_position(symbol=order['symbol'], direction='long', offset='open',
vol=order['vol']*contract_size, price=order['limit_price'],
marginratio=margin, comm_o=comm_o, comm_t=comm_t, comm_y=comm_y,
time=time, date=date, exch_code=exch_code,info=self.context.instmt_info)
elif order['direction'] == SELL and order['offset'] == OPEN:
self.context.portfolio.modify_position(symbol=order['symbol'], direction='short', offset='open',
vol=order['vol'] * contract_size, price=order['limit_price'],
marginratio=margin,
comm_o=comm_o, comm_t=comm_t, comm_y=comm_y, time=time,date=date, exch_code=exch_code,info=self.context.instmt_info)
elif order['direction'] == BUY and order['offset'] == CLOSE:
self.context.portfolio.modify_position(symbol=order['symbol'], direction='short', offset='close',
vol=order['vol'] * contract_size, price=order['limit_price'],
marginratio=margin,
comm_o=comm_o, comm_t=comm_t, comm_y=comm_y, time=time,date=date, exch_code=exch_code)
elif order['direction'] == BUY and order['offset'] == CLOSE_T:
self.context.portfolio.modify_position(symbol=order['symbol'], direction='short', offset='close_t',
vol=order['vol'] * contract_size, price=order['limit_price'],
marginratio=margin,
comm_o=comm_o, comm_t=comm_t, comm_y=comm_y, time=time,date=date, exch_code=exch_code)
elif order['direction'] == BUY and order['offset'] == CLOSE_Y:
self.context.portfolio.modify_position(symbol=order['symbol'], direction='short', offset='close_y',
vol=order['vol'] * contract_size, price=order['limit_price'],
marginratio=margin,
comm_o=comm_o, comm_t=comm_t, comm_y=comm_y, time=time,date=date, exch_code=exch_code)
elif order['direction'] == SELL and order['offset'] == CLOSE:
self.context.portfolio.modify_position(symbol=order['symbol'], direction='long', offset='close',
vol=order['vol'] * contract_size, price=order['limit_price'],
marginratio=margin,
comm_o=comm_o, comm_t=comm_t, comm_y=comm_y, time=time,date=date, exch_code=exch_code)
elif order['direction'] == SELL and order['offset'] == CLOSE_T:
self.context.portfolio.modify_position(symbol=order['symbol'], direction='long', offset='close_t',
vol=order['vol'] * contract_size, price=order['limit_price'],
marginratio=margin,
comm_o=comm_o, comm_t=comm_t, comm_y=comm_y, time=time,date=date, exch_code=exch_code)
elif order['direction'] == SELL and order['offset'] == CLOSE_Y:
self.context.portfolio.modify_position(symbol=order['symbol'], direction='long', offset='close_y',
vol=order['vol'] * contract_size, price=order['limit_price'],
marginratio=margin,
comm_o=comm_o, comm_t=comm_t, comm_y=comm_y, time=time,date=date, exch_code=exch_code)
# else:
# # 撤单
# pass
print(
'after trade: curtime:%s, curprice:%.2f,total:%d, marginreq:%d, avalcash:%d,upnl:%d,daypnl:%d,daycomm:%d,prebalance:%d' % (
self.context.current_bar.end_time,
self.context.current_bar.close,
self.context.portfolio.total_value, self.context.portfolio.marginreq,
self.context.portfolio.avail_cash, self.context.portfolio.upnl,
self.context.portfolio.dailypnl,
self.context.portfolio.dailycomm,
self.context.portfolio.pre_balance))
event = Event(EVENT_AFTER_FILL)
event.dict = order
self._engine.sendEvent(event)
def _order_change(self, event):
self.order_change(event.dict)
self.context.order_flag = False
event = Event(EVENT_NEXT_BAR)
self._engine.sendEvent(event)
def _handle_dayend(self, event={}):
self.context.portfolio.dayend_summary(date=self.context.date,
settlement_price=self.context.current_bar.close)
print(self.context.date)
self.context.portfolio.stats.nv = np.array(self.context.portfolio.netvalue)
self.context.comparision.datelst.append(self.context.date)
self.context.comparision.dayend_value.append(self.context.portfolio.total_value)
self.context.comparision.simplereturn.append(self.context.portfolio.stats.returns('simple', 'def'))
self.context.comparision.logreturn.append(self.context.portfolio.stats.returns('log', 'def'))
self.context.comparision.volit.append(self.context.portfolio.stats.volatility())
self.context.comparision.maxdd.append(self.context.portfolio.stats.maxdd())
self.context.comparision.sharpe.append(self.context.portfolio.stats.sharpe())
self.context.comparision.totalcommission.append(self.context.portfolio.totalcomm)
self.context.comparision.tradecount.append(self.context.portfolio.tradecount)
self.context.comparision.net_profit.append(self.context.portfolio.total_value-self.context.init_cash)
self.context.comparision.trans += self.context.portfolio.stats.transactions
self.initialize()
# self.context.direction = ''
# self.context.open_vol = 0 # 当前开仓手数
# self.context.open_flag = False # false表示没有开仓 true表示已经开仓了
# self.context.can_open_flag = True # ture 表示能继续开仓 flase 表示已经开足仓了
# self.context.close_count = 0 # 平仓计数器
# self.context.boll =Boll()
# self.context.open_price = 0
self.context.portfolio = Portfolio(init_cash=self.context.init_cash)
event = Event(EVENT_NEXT_DAY)
self._engine.sendEvent(event)
def _handle_output(self, event={}):
outputwb = xlwt.Workbook()
dayend = outputwb.add_sheet('dayend')
transdetail = outputwb.add_sheet('transactions')
profit = np.array(self.context.comparision.net_profit)
avg_profit = np.mean(profit)
std_profit = np.std(profit)
n = len(profit)
count_gain = 0
for i in self.context.comparision.net_profit:
if i >0:
count_gain += 1
count_loss = n - count_gain
dayend.write(0, 0, 'date')
dayend.write(0, 1, 'value')
dayend.write(0, 2, 'simple_return')
dayend.write(0, 3, 'log_return')
dayend.write(0, 4, 'volitility')
dayend.write(0, 5, 'maxdd')
dayend.write(0, 6, 'sharpe')
dayend.write(0, 7, 'total_commission')
dayend.write(0, 8, 'trade_count')
dayend.write(0, 9, 'net_profit')
dayend.write(1, 11, 'average_profit')
dayend.write(2, 11, 'std_profit')
dayend.write(3, 11, 'days_total')
dayend.write(4, 11, 'days_gain')
dayend.write(5, 11, 'days_loss')
dayend.write(1, 12, avg_profit)
dayend.write(2, 12, std_profit)
dayend.write(3, 12, n)
dayend.write(4, 12, count_gain)
dayend.write(5, 12, count_loss)
for i in range(0, len(self.context.comparision.datelst)):
dayend.write(i+1, 0, self.context.comparision.datelst[i])
dayend.write(i+1, 1, self.context.comparision.dayend_value[i])
dayend.write(i+1, 2, self.context.comparision.simplereturn[i])
dayend.write(i+1, 3, self.context.comparision.logreturn[i])
dayend.write(i+1, 4, self.context.comparision.volit[i])
dayend.write(i+1, 5, self.context.comparision.maxdd[i])
dayend.write(i+1, 6, self.context.comparision.sharpe[i])
dayend.write(i+1, 7, self.context.comparision.totalcommission[i])
dayend.write(i+1, 8, self.context.comparision.tradecount[i])
dayend.write(i+1, 9, self.context.comparision.net_profit[i])
transdetail.write(0, 0, 'date')
transdetail.write(0, 1, 'time')
transdetail.write(0, 2, 'symbol')
transdetail.write(0, 3, 'direction')
transdetail.write(0, 4, 'offset')
transdetail.write(0, 5, 'price')
transdetail.write(0, 6, 'volume')
transdetail.write(0, 7, 'commission')
transdetail.write(0, 8, 'realized gain/loss')
for i in range(0, len(self.context.comparision.trans)):
trans = self.context.comparision.trans[i]
transdetail.write(i + 1, 0, trans.date)
transdetail.write(i + 1, 1, trans.time)
transdetail.write(i + 1, 2, trans.symbol)
transdetail.write(i + 1, 3, trans.direction)
transdetail.write(i + 1, 4, trans.offset)
transdetail.write(i + 1, 5, trans.price)
transdetail.write(i + 1, 6, trans.vol)
transdetail.write(i + 1, 7, trans.commission)
transdetail.write(i + 1, 8, trans.pnl)
outputwb.save(self.context.run_info.strategy_name + '-'+ str(self.context.comparision.datelst[0]) + '-'
+ str(self.context.comparision.datelst[-1]) + '-backtest-comp' + '.xls')
timeend = datetime.datetime.now()
timespend = timeend - self.context.timestart
print('回测共耗时%s' % timespend)
self._engine.stop()
def _fill(self, event): #增加风控模块后调用
pass
def cancel_order(self, order):
order = Order()
order.cancel_flag = True
# 发送执行order事件
event = Event(EVENT_FILL)
event.dict = order
self._engine.sendEvent(event)
# 更新持仓。。。
# 生成滑点
def stop(self):
self._engine.stop()
# 获取tick
def __get_tick(self, date, symbol, ip):
tick={}
if self.context.datasource == 'mongo':
tick = TradeDataMongo(symbol=symbol, date=date,column=miniclms, ip=ip).get_tick_data() # 数据库取某天的tick
elif self.context.datasource == 'csv':
tick = GetDataCSV(symbol + '-' + date + '.csv').get_tick()
return tick
def __get_ticks_dict(self, date, symbols,ip):
ticks = {}
for symbol in symbols:
ticks[symbol] = self.__get_tick(date, symbol,ip)
return ticks
def __get_bar(self, date, symbol, freq, ip):
bar = {}
if self.context.datasource == 'mongo':
bar = TradeDataMongo(symbol=symbol, date=date, column=miniclms, ip=ip).get_bar_data(freq=freq) # 数据库取某天的tick
elif self.context.datasource == 'csv':
bar = GetDataCSV(symbol + '-' + date + '.csv').get_tick()
return bar
def __get_bars_dict(self, date, symbols,ip):
bars = {}
for symbol in symbols:
bars[symbol] = self.__get_bar(date, symbol,ip)
return bars
def __get_main_contract(self, date, symbols, ip):
main_contract = []
if self.context.datasource == 'mongo':
for symbol in symbols:
main_contract.append(TradeDataMongo(symbol=symbol, date=date, column=miniclms, ip=ip).get_main_contract()) # 数据库取某天的tick
elif self.context.datasource == 'csv':
pass
return main_contract
|
apache-2.0
|
sajeeshcs/nested_projects_keystone
|
keystone/tests/test_keystoneclient.py
|
1
|
45836
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os
import uuid
import mock
from oslo.utils import timeutils
import webob
from keystone import config
from keystone.openstack.common import jsonutils
from keystone import tests
from keystone.tests import default_fixtures
from keystone.tests.ksfixtures import appserver
from keystone.tests.ksfixtures import database
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
OPENSTACK_REPO = os.environ.get('OPENSTACK_REPO',
'https://git.openstack.org/openstack')
KEYSTONECLIENT_REPO = '%s/python-keystoneclient.git' % OPENSTACK_REPO
class CompatTestCase(tests.NoModule, tests.TestCase):
def setUp(self):
super(CompatTestCase, self).setUp()
# FIXME(morganfainberg): Since we are running tests through the
# controllers and some internal api drivers are SQL-only, the correct
# approach is to ensure we have the correct backing store. The
# credential api makes some very SQL specific assumptions that should
# be addressed allowing for non-SQL based testing to occur.
self.useFixture(database.Database())
self.load_backends()
self.load_fixtures(default_fixtures)
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_admin['id'])
conf = self._paste_config('keystone')
fixture = self.useFixture(appserver.AppServer(conf, appserver.MAIN))
self.public_server = fixture.server
fixture = self.useFixture(appserver.AppServer(conf, appserver.ADMIN))
self.admin_server = fixture.server
self.addCleanup(self.cleanup_instance('public_server', 'admin_server'))
if isinstance(self.checkout_info, str):
revdir = self.checkout_info
else:
revdir = tests.checkout_vendor(*self.checkout_info)
self.add_path(revdir)
self.clear_module('keystoneclient')
def _public_url(self):
public_port = self.public_server.socket_info['socket'][1]
return "http://localhost:%s/v2.0" % public_port
def _admin_url(self):
admin_port = self.admin_server.socket_info['socket'][1]
return "http://localhost:%s/v2.0" % admin_port
def _client(self, admin=False, **kwargs):
from keystoneclient.v2_0 import client as ks_client
url = self._admin_url() if admin else self._public_url()
kc = ks_client.Client(endpoint=url,
auth_url=self._public_url(),
**kwargs)
kc.authenticate()
# have to manually overwrite the management url after authentication
kc.management_url = url
return kc
def get_client(self, user_ref=None, tenant_ref=None, admin=False):
if user_ref is None:
user_ref = self.user_foo
if tenant_ref is None:
for user in default_fixtures.USERS:
# The fixture ID is no longer used as the ID in the database
# The fixture ID, however, is still used as part of the
# attribute name when storing the created object on the test
# case. This means that we need to use the fixture ID below to
# find the actial object so that we can get the ID as stored
# in the database to compare against.
if (getattr(self, 'user_%s' % user['id'])['id'] ==
user_ref['id']):
tenant_id = user['tenants'][0]
else:
tenant_id = tenant_ref['id']
return self._client(username=user_ref['name'],
password=user_ref['password'],
tenant_id=tenant_id,
admin=admin)
class KeystoneClientTests(object):
"""Tests for all versions of keystoneclient."""
def test_authenticate_tenant_name_and_tenants(self):
client = self.get_client()
tenants = client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_tenant_id_and_tenants(self):
client = self._client(username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id='bar')
tenants = client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_invalid_tenant_id(self):
from keystoneclient import exceptions as client_exceptions
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_id='baz')
def test_authenticate_token_no_tenant(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token)
tenants = token_client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_token_tenant_id(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token, tenant_id='bar')
tenants = token_client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_token_invalid_tenant_id(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
token = client.auth_token
self.assertRaises(client_exceptions.Unauthorized,
self._client, token=token,
tenant_id=uuid.uuid4().hex)
def test_authenticate_token_invalid_tenant_name(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
token = client.auth_token
self.assertRaises(client_exceptions.Unauthorized,
self._client, token=token,
tenant_name=uuid.uuid4().hex)
def test_authenticate_token_tenant_name(self):
client = self.get_client()
token = client.auth_token
token_client = self._client(token=token, tenant_name='BAR')
tenants = token_client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
def test_authenticate_and_delete_token(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
token = client.auth_token
token_client = self._client(token=token)
tenants = token_client.tenants.list()
self.assertEqual(self.tenant_bar['id'], tenants[0].id)
client.tokens.delete(token_client.auth_token)
self.assertRaises(client_exceptions.Unauthorized,
token_client.tenants.list)
def test_authenticate_no_password(self):
from keystoneclient import exceptions as client_exceptions
user_ref = self.user_foo.copy()
user_ref['password'] = None
self.assertRaises(client_exceptions.AuthorizationFailure,
self.get_client,
user_ref)
def test_authenticate_no_username(self):
from keystoneclient import exceptions as client_exceptions
user_ref = self.user_foo.copy()
user_ref['name'] = None
self.assertRaises(client_exceptions.AuthorizationFailure,
self.get_client,
user_ref)
def test_authenticate_disabled_tenant(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
tenant = {
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': False,
}
tenant_ref = admin_client.tenants.create(
tenant_name=tenant['name'],
description=tenant['description'],
enabled=tenant['enabled'])
tenant['id'] = tenant_ref.id
user = {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'email': uuid.uuid4().hex,
'tenant_id': tenant['id'],
}
user_ref = admin_client.users.create(
name=user['name'],
password=user['password'],
email=user['email'],
tenant_id=user['tenant_id'])
user['id'] = user_ref.id
# password authentication
self.assertRaises(
client_exceptions.Unauthorized,
self._client,
username=user['name'],
password=user['password'],
tenant_id=tenant['id'])
# token authentication
client = self._client(
username=user['name'],
password=user['password'])
self.assertRaises(
client_exceptions.Unauthorized,
self._client,
token=client.auth_token,
tenant_id=tenant['id'])
# FIXME(ja): this test should require the "keystone:admin" roled
# (probably the role set via --keystone_admin_role flag)
# FIXME(ja): add a test that admin endpoint is only sent to admin user
# FIXME(ja): add a test that admin endpoint returns unauthorized if not
# admin
def test_tenant_create_update_and_delete(self):
from keystoneclient import exceptions as client_exceptions
tenant_name = 'original_tenant'
tenant_description = 'My original tenant!'
tenant_enabled = True
client = self.get_client(admin=True)
# create, get, and list a tenant
tenant = client.tenants.create(tenant_name=tenant_name,
description=tenant_description,
enabled=tenant_enabled)
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertEqual(tenant_enabled, tenant.enabled)
tenant = client.tenants.get(tenant_id=tenant.id)
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertEqual(tenant_enabled, tenant.enabled)
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertEqual(tenant_enabled, tenant.enabled)
# update, get, and list a tenant
tenant_name = 'updated_tenant'
tenant_description = 'Updated tenant!'
tenant_enabled = False
tenant = client.tenants.update(tenant_id=tenant.id,
tenant_name=tenant_name,
enabled=tenant_enabled,
description=tenant_description)
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertEqual(tenant_enabled, tenant.enabled)
tenant = client.tenants.get(tenant_id=tenant.id)
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertEqual(tenant_enabled, tenant.enabled)
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertEqual(tenant_enabled, tenant.enabled)
# delete, get, and list a tenant
client.tenants.delete(tenant=tenant.id)
self.assertRaises(client_exceptions.NotFound, client.tenants.get,
tenant.id)
self.assertFalse([t for t in client.tenants.list()
if t.id == tenant.id])
def test_tenant_create_update_and_delete_unicode(self):
from keystoneclient import exceptions as client_exceptions
tenant_name = u'original \u540d\u5b57'
tenant_description = 'My original tenant!'
tenant_enabled = True
client = self.get_client(admin=True)
# create, get, and list a tenant
tenant = client.tenants.create(tenant_name,
description=tenant_description,
enabled=tenant_enabled)
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertIs(tenant.enabled, tenant_enabled)
tenant = client.tenants.get(tenant.id)
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertIs(tenant.enabled, tenant_enabled)
# multiple tenants exist due to fixtures, so find the one we're testing
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertIs(tenant.enabled, tenant_enabled)
# update, get, and list a tenant
tenant_name = u'updated \u540d\u5b57'
tenant_description = 'Updated tenant!'
tenant_enabled = False
tenant = client.tenants.update(tenant.id,
tenant_name=tenant_name,
enabled=tenant_enabled,
description=tenant_description)
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertIs(tenant.enabled, tenant_enabled)
tenant = client.tenants.get(tenant.id)
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertIs(tenant.enabled, tenant_enabled)
tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
self.assertEqual(tenant_name, tenant.name)
self.assertEqual(tenant_description, tenant.description)
self.assertIs(tenant.enabled, tenant_enabled)
# delete, get, and list a tenant
client.tenants.delete(tenant.id)
self.assertRaises(client_exceptions.NotFound, client.tenants.get,
tenant.id)
self.assertFalse([t for t in client.tenants.list()
if t.id == tenant.id])
def test_tenant_create_no_name(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.BadRequest,
client.tenants.create,
tenant_name="")
def test_tenant_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.tenants.delete,
tenant=uuid.uuid4().hex)
def test_tenant_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.tenants.get,
tenant_id=uuid.uuid4().hex)
def test_tenant_update_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.tenants.update,
tenant_id=uuid.uuid4().hex)
def test_tenant_list(self):
client = self.get_client()
tenants = client.tenants.list()
self.assertEqual(1, len(tenants))
# Admin endpoint should return *all* tenants
client = self.get_client(admin=True)
tenants = client.tenants.list()
self.assertEqual(len(default_fixtures.TENANTS), len(tenants))
def test_invalid_password(self):
from keystoneclient import exceptions as client_exceptions
good_client = self._client(username=self.user_foo['name'],
password=self.user_foo['password'])
good_client.tenants.list()
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=self.user_foo['name'],
password=uuid.uuid4().hex)
def test_invalid_user_and_password(self):
from keystoneclient import exceptions as client_exceptions
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
def test_change_password_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
username = uuid.uuid4().hex
passwd = uuid.uuid4().hex
user = client.users.create(name=username, password=passwd,
email=uuid.uuid4().hex)
token_id = client.tokens.authenticate(username=username,
password=passwd).id
# authenticate with a token should work before a password change
client.tokens.authenticate(token=token_id)
client.users.update_password(user=user.id, password=uuid.uuid4().hex)
# authenticate with a token should not work after a password change
self.assertRaises(client_exceptions.Unauthorized,
client.tokens.authenticate,
token=token_id)
def test_disable_tenant_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
foo_client = self.get_client(self.user_foo)
tenant_bar = admin_client.tenants.get(self.tenant_bar['id'])
# Disable the tenant.
tenant_bar.update(enabled=False)
# Test that the token has been removed.
self.assertRaises(client_exceptions.Unauthorized,
foo_client.tokens.authenticate,
token=foo_client.auth_token)
# Test that the user access has been disabled.
self.assertRaises(client_exceptions.Unauthorized,
self.get_client,
self.user_foo)
def test_delete_tenant_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
foo_client = self.get_client(self.user_foo)
tenant_bar = admin_client.tenants.get(self.tenant_bar['id'])
# Delete the tenant.
tenant_bar.delete()
# Test that the token has been removed.
self.assertRaises(client_exceptions.Unauthorized,
foo_client.tokens.authenticate,
token=foo_client.auth_token)
# Test that the user access has been disabled.
self.assertRaises(client_exceptions.Unauthorized,
self.get_client,
self.user_foo)
def test_disable_user_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
foo_client = self.get_client(self.user_foo)
admin_client.users.update_enabled(user=self.user_foo['id'],
enabled=False)
self.assertRaises(client_exceptions.Unauthorized,
foo_client.tokens.authenticate,
token=foo_client.auth_token)
self.assertRaises(client_exceptions.Unauthorized,
self.get_client,
self.user_foo)
def test_delete_user_invalidates_token(self):
from keystoneclient import exceptions as client_exceptions
admin_client = self.get_client(admin=True)
client = self.get_client(admin=False)
username = uuid.uuid4().hex
password = uuid.uuid4().hex
user_id = admin_client.users.create(
name=username, password=password, email=uuid.uuid4().hex).id
token_id = client.tokens.authenticate(
username=username, password=password).id
# token should be usable before the user is deleted
client.tokens.authenticate(token=token_id)
admin_client.users.delete(user=user_id)
# authenticate with a token should not work after the user is deleted
self.assertRaises(client_exceptions.Unauthorized,
client.tokens.authenticate,
token=token_id)
@mock.patch.object(timeutils, 'utcnow')
def test_token_expiry_maintained(self, mock_utcnow):
now = datetime.datetime.utcnow()
mock_utcnow.return_value = now
foo_client = self.get_client(self.user_foo)
orig_token = foo_client.service_catalog.catalog['token']
mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
reauthenticated_token = foo_client.tokens.authenticate(
token=foo_client.auth_token)
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(orig_token['expires']),
timeutils.parse_isotime(reauthenticated_token.expires))
def test_user_create_update_delete(self):
from keystoneclient import exceptions as client_exceptions
test_username = 'new_user'
client = self.get_client(admin=True)
user = client.users.create(name=test_username,
password='password',
email='[email protected]')
self.assertEqual(test_username, user.name)
user = client.users.get(user=user.id)
self.assertEqual(test_username, user.name)
user = client.users.update(user=user,
name=test_username,
email='[email protected]')
self.assertEqual('[email protected]', user.email)
# NOTE(termie): update_enabled doesn't return anything, probably a bug
client.users.update_enabled(user=user, enabled=False)
user = client.users.get(user.id)
self.assertFalse(user.enabled)
self.assertRaises(client_exceptions.Unauthorized,
self._client,
username=test_username,
password='password')
client.users.update_enabled(user, True)
user = client.users.update_password(user=user, password='password2')
self._client(username=test_username,
password='password2')
user = client.users.update_tenant(user=user, tenant='bar')
# TODO(ja): once keystonelight supports default tenant
# when you login without specifying tenant, the
# token should be scoped to tenant 'bar'
client.users.delete(user.id)
self.assertRaises(client_exceptions.NotFound, client.users.get,
user.id)
# Test creating a user with a tenant (auto-add to tenant)
user2 = client.users.create(name=test_username,
password='password',
email='[email protected]',
tenant_id='bar')
self.assertEqual(test_username, user2.name)
def test_update_default_tenant_to_existing_value(self):
client = self.get_client(admin=True)
user = client.users.create(
name=uuid.uuid4().hex,
password=uuid.uuid4().hex,
email=uuid.uuid4().hex,
tenant_id=self.tenant_bar['id'])
# attempting to update the tenant with the existing value should work
user = client.users.update_tenant(
user=user, tenant=self.tenant_bar['id'])
def test_user_create_no_string_password(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.BadRequest,
client.users.create,
name='test_user',
password=12345,
email=uuid.uuid4().hex)
def test_user_create_no_name(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.BadRequest,
client.users.create,
name="",
password=uuid.uuid4().hex,
email=uuid.uuid4().hex)
def test_user_create_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.create,
name=uuid.uuid4().hex,
password=uuid.uuid4().hex,
email=uuid.uuid4().hex,
tenant_id=uuid.uuid4().hex)
def test_user_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.get,
user=uuid.uuid4().hex)
def test_user_list_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.list,
tenant_id=uuid.uuid4().hex)
def test_user_update_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.update,
user=uuid.uuid4().hex)
def test_user_update_tenant(self):
client = self.get_client(admin=True)
tenant_id = uuid.uuid4().hex
user = client.users.update(user=self.user_foo['id'],
tenant_id=tenant_id)
self.assertEqual(tenant_id, user.tenant_id)
def test_user_update_password_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.update_password,
user=uuid.uuid4().hex,
password=uuid.uuid4().hex)
def test_user_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.users.delete,
user=uuid.uuid4().hex)
def test_user_list(self):
client = self.get_client(admin=True)
users = client.users.list()
self.assertTrue(len(users) > 0)
user = users[0]
self.assertRaises(AttributeError, lambda: user.password)
def test_user_get(self):
client = self.get_client(admin=True)
user = client.users.get(user=self.user_foo['id'])
self.assertRaises(AttributeError, lambda: user.password)
def test_role_get(self):
client = self.get_client(admin=True)
role = client.roles.get(role=self.role_admin['id'])
self.assertEqual(self.role_admin['id'], role.id)
def test_role_crud(self):
from keystoneclient import exceptions as client_exceptions
test_role = 'new_role'
client = self.get_client(admin=True)
role = client.roles.create(name=test_role)
self.assertEqual(test_role, role.name)
role = client.roles.get(role=role.id)
self.assertEqual(test_role, role.name)
client.roles.delete(role=role.id)
self.assertRaises(client_exceptions.NotFound,
client.roles.delete,
role=role.id)
self.assertRaises(client_exceptions.NotFound,
client.roles.get,
role=role.id)
def test_role_create_no_name(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.BadRequest,
client.roles.create,
name="")
def test_role_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.get,
role=uuid.uuid4().hex)
def test_role_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.delete,
role=uuid.uuid4().hex)
def test_role_list_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.roles_for_user,
user=uuid.uuid4().hex,
tenant=uuid.uuid4().hex)
self.assertRaises(client_exceptions.NotFound,
client.roles.roles_for_user,
user=self.user_foo['id'],
tenant=uuid.uuid4().hex)
self.assertRaises(client_exceptions.NotFound,
client.roles.roles_for_user,
user=uuid.uuid4().hex,
tenant=self.tenant_bar['id'])
def test_role_list(self):
client = self.get_client(admin=True)
roles = client.roles.list()
# TODO(devcamcar): This assert should be more specific.
self.assertTrue(len(roles) > 0)
def test_service_crud(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
service_name = uuid.uuid4().hex
service_type = uuid.uuid4().hex
service_desc = uuid.uuid4().hex
# create & read
service = client.services.create(name=service_name,
service_type=service_type,
description=service_desc)
self.assertEqual(service_name, service.name)
self.assertEqual(service_type, service.type)
self.assertEqual(service_desc, service.description)
service = client.services.get(id=service.id)
self.assertEqual(service_name, service.name)
self.assertEqual(service_type, service.type)
self.assertEqual(service_desc, service.description)
service = [x for x in client.services.list() if x.id == service.id][0]
self.assertEqual(service_name, service.name)
self.assertEqual(service_type, service.type)
self.assertEqual(service_desc, service.description)
# update is not supported in API v2...
# delete & read
client.services.delete(id=service.id)
self.assertRaises(client_exceptions.NotFound,
client.services.get,
id=service.id)
services = [x for x in client.services.list() if x.id == service.id]
self.assertEqual(0, len(services))
def test_service_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.services.delete,
id=uuid.uuid4().hex)
def test_service_get_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.services.get,
id=uuid.uuid4().hex)
def test_endpoint_delete_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.endpoints.delete,
id=uuid.uuid4().hex)
def test_admin_requires_adminness(self):
from keystoneclient import exceptions as client_exceptions
# FIXME(ja): this should be Unauthorized
exception = client_exceptions.ClientException
two = self.get_client(self.user_two, admin=True) # non-admin user
# USER CRUD
self.assertRaises(exception,
two.users.list)
self.assertRaises(exception,
two.users.get,
user=self.user_two['id'])
self.assertRaises(exception,
two.users.create,
name='oops',
password='password',
email='[email protected]')
self.assertRaises(exception,
two.users.delete,
user=self.user_foo['id'])
# TENANT CRUD
self.assertRaises(exception,
two.tenants.list)
self.assertRaises(exception,
two.tenants.get,
tenant_id=self.tenant_bar['id'])
self.assertRaises(exception,
two.tenants.create,
tenant_name='oops',
description="shouldn't work!",
enabled=True)
self.assertRaises(exception,
two.tenants.delete,
tenant=self.tenant_baz['id'])
# ROLE CRUD
self.assertRaises(exception,
two.roles.get,
role=self.role_admin['id'])
self.assertRaises(exception,
two.roles.list)
self.assertRaises(exception,
two.roles.create,
name='oops')
self.assertRaises(exception,
two.roles.delete,
role=self.role_admin['id'])
# TODO(ja): MEMBERSHIP CRUD
# TODO(ja): determine what else todo
def test_tenant_add_and_remove_user(self):
client = self.get_client(admin=True)
client.roles.add_user_role(tenant=self.tenant_bar['id'],
user=self.user_two['id'],
role=self.role_other['id'])
user_refs = client.tenants.list_users(tenant=self.tenant_bar['id'])
self.assertIn(self.user_two['id'], [x.id for x in user_refs])
client.roles.remove_user_role(tenant=self.tenant_bar['id'],
user=self.user_two['id'],
role=self.role_other['id'])
roles = client.roles.roles_for_user(user=self.user_foo['id'],
tenant=self.tenant_bar['id'])
self.assertNotIn(self.role_other['id'], roles)
user_refs = client.tenants.list_users(tenant=self.tenant_bar['id'])
self.assertNotIn(self.user_two['id'], [x.id for x in user_refs])
def test_user_role_add_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.add_user_role,
tenant=uuid.uuid4().hex,
user=self.user_foo['id'],
role=self.role_member['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.add_user_role,
tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=uuid.uuid4().hex)
def test_user_role_add_no_user(self):
# If add_user_role and user doesn't exist, doesn't fail.
client = self.get_client(admin=True)
client.roles.add_user_role(tenant=self.tenant_baz['id'],
user=uuid.uuid4().hex,
role=self.role_member['id'])
def test_user_role_remove_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=uuid.uuid4().hex,
user=self.user_foo['id'],
role=self.role_member['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=self.tenant_baz['id'],
user=uuid.uuid4().hex,
role=self.role_member['id'])
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=uuid.uuid4().hex)
self.assertRaises(client_exceptions.NotFound,
client.roles.remove_user_role,
tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=self.role_member['id'])
def test_tenant_list_marker(self):
client = self.get_client()
# Add two arbitrary tenants to user for testing purposes
for i in range(2):
tenant_id = uuid.uuid4().hex
tenant = {'name': 'tenant-%s' % tenant_id, 'id': tenant_id,
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(tenant_id, tenant)
self.assignment_api.add_user_to_project(tenant_id,
self.user_foo['id'])
tenants = client.tenants.list()
self.assertEqual(3, len(tenants))
tenants_marker = client.tenants.list(marker=tenants[0].id)
self.assertEqual(2, len(tenants_marker))
self.assertEqual(tenants_marker[0].name, tenants[1].name)
self.assertEqual(tenants_marker[1].name, tenants[2].name)
def test_tenant_list_marker_not_found(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.BadRequest,
client.tenants.list, marker=uuid.uuid4().hex)
def test_tenant_list_limit(self):
client = self.get_client()
# Add two arbitrary tenants to user for testing purposes
for i in range(2):
tenant_id = uuid.uuid4().hex
tenant = {'name': 'tenant-%s' % tenant_id, 'id': tenant_id,
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(tenant_id, tenant)
self.assignment_api.add_user_to_project(tenant_id,
self.user_foo['id'])
tenants = client.tenants.list()
self.assertEqual(3, len(tenants))
tenants_limited = client.tenants.list(limit=2)
self.assertEqual(2, len(tenants_limited))
self.assertEqual(tenants[0].name, tenants_limited[0].name)
self.assertEqual(tenants[1].name, tenants_limited[1].name)
def test_tenant_list_limit_bad_value(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client()
self.assertRaises(client_exceptions.BadRequest,
client.tenants.list, limit='a')
self.assertRaises(client_exceptions.BadRequest,
client.tenants.list, limit=-1)
def test_roles_get_by_user(self):
client = self.get_client(admin=True)
roles = client.roles.roles_for_user(user=self.user_foo['id'],
tenant=self.tenant_bar['id'])
self.assertTrue(len(roles) > 0)
def test_user_can_update_passwd(self):
client = self.get_client(self.user_two)
token_id = client.auth_token
new_password = uuid.uuid4().hex
# TODO(derekh): Update to use keystoneclient when available
class FakeResponse(object):
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
responseobject = FakeResponse()
req = webob.Request.blank(
'/v2.0/OS-KSCRUD/users/%s' % self.user_two['id'],
headers={'X-Auth-Token': token_id})
req.method = 'PATCH'
req.body = ('{"user":{"password":"%s","original_password":"%s"}}' %
(new_password, self.user_two['password']))
self.public_server.application(req.environ,
responseobject.start_fake_response)
self.user_two['password'] = new_password
self.get_client(self.user_two)
def test_user_cannot_update_other_users_passwd(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(self.user_two)
token_id = client.auth_token
new_password = uuid.uuid4().hex
# TODO(derekh): Update to use keystoneclient when available
class FakeResponse(object):
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
responseobject = FakeResponse()
req = webob.Request.blank(
'/v2.0/OS-KSCRUD/users/%s' % self.user_foo['id'],
headers={'X-Auth-Token': token_id})
req.method = 'PATCH'
req.body = ('{"user":{"password":"%s","original_password":"%s"}}' %
(new_password, self.user_two['password']))
self.public_server.application(req.environ,
responseobject.start_fake_response)
self.assertEqual(403, responseobject.response_status)
self.user_two['password'] = new_password
self.assertRaises(client_exceptions.Unauthorized,
self.get_client, self.user_two)
def test_tokens_after_user_update_passwd(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(self.user_two)
token_id = client.auth_token
new_password = uuid.uuid4().hex
# TODO(derekh): Update to use keystoneclient when available
class FakeResponse(object):
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
responseobject = FakeResponse()
req = webob.Request.blank(
'/v2.0/OS-KSCRUD/users/%s' % self.user_two['id'],
headers={'X-Auth-Token': token_id})
req.method = 'PATCH'
req.body = ('{"user":{"password":"%s","original_password":"%s"}}' %
(new_password, self.user_two['password']))
rv = self.public_server.application(
req.environ,
responseobject.start_fake_response)
response_json = jsonutils.loads(rv.pop())
new_token_id = response_json['access']['token']['id']
self.assertRaises(client_exceptions.Unauthorized, client.tenants.list)
client.auth_token = new_token_id
client.tenants.list()
class KcMasterTestCase(CompatTestCase, KeystoneClientTests):
checkout_info = (KEYSTONECLIENT_REPO, 'master')
class KcOptTestCase(KcMasterTestCase):
# Set KSCTEST_PATH to the keystoneclient directory, then run this test.
#
# For example, to test your local keystoneclient,
#
# KSCTEST_PATH=/opt/stack/python-keystoneclient \
# tox -e py27 test_keystoneclient.KcOptTestCase
def setUp(self):
self.checkout_info = os.environ.get('KSCTEST_PATH')
if not self.checkout_info:
self.skip('Set KSCTEST_PATH env to test with local client')
super(KcOptTestCase, self).setUp()
|
apache-2.0
|
EduPepperPD/pepper2013
|
common/djangoapps/course_groups/tests/tests.py
|
32
|
10459
|
import django.test
from django.contrib.auth.models import User
from django.conf import settings
from django.test.utils import override_settings
from course_groups.models import CourseUserGroup
from course_groups.cohorts import (get_cohort, get_course_cohorts,
is_commentable_cohorted, get_cohort_by_name)
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
from xmodule.modulestore.tests.django_utils import mixed_store_config
# NOTE: running this with the lms.envs.test config works without
# manually overriding the modulestore. However, running with
# cms.envs.test doesn't.
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
TEST_MAPPING = {'edX/toy/2012_Fall': 'xml'}
TEST_DATA_MIXED_MODULESTORE = mixed_store_config(TEST_DATA_DIR, TEST_MAPPING)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestCohorts(django.test.TestCase):
@staticmethod
def topic_name_to_id(course, name):
"""
Given a discussion topic name, return an id for that name (includes
course and url_name).
"""
return "{course}_{run}_{name}".format(course=course.location.course,
run=course.url_name,
name=name)
@staticmethod
def config_course_cohorts(course, discussions,
cohorted,
cohorted_discussions=None,
auto_cohort=None,
auto_cohort_groups=None):
"""
Given a course with no discussion set up, add the discussions and set
the cohort config appropriately.
Arguments:
course: CourseDescriptor
discussions: list of topic names strings. Picks ids and sort_keys
automatically.
cohorted: bool.
cohorted_discussions: optional list of topic names. If specified,
converts them to use the same ids as topic names.
auto_cohort: optional bool.
auto_cohort_groups: optional list of strings
(names of groups to put students into).
Returns:
Nothing -- modifies course in place.
"""
def to_id(name):
return TestCohorts.topic_name_to_id(course, name)
topics = dict((name, {"sort_key": "A",
"id": to_id(name)})
for name in discussions)
course.discussion_topics = topics
d = {"cohorted": cohorted}
if cohorted_discussions is not None:
d["cohorted_discussions"] = [to_id(name)
for name in cohorted_discussions]
if auto_cohort is not None:
d["auto_cohort"] = auto_cohort
if auto_cohort_groups is not None:
d["auto_cohort_groups"] = auto_cohort_groups
course.cohort_config = d
def setUp(self):
"""
Make sure that course is reloaded every time--clear out the modulestore.
"""
clear_existing_modulestores()
def test_get_cohort(self):
"""
Make sure get_cohort() does the right thing when the course is cohorted
"""
course = modulestore().get_course("edX/toy/2012_Fall")
self.assertEqual(course.id, "edX/toy/2012_Fall")
self.assertFalse(course.is_cohorted)
user = User.objects.create(username="test", email="[email protected]")
other_user = User.objects.create(username="test2", email="[email protected]")
self.assertIsNone(get_cohort(user, course.id), "No cohort created yet")
cohort = CourseUserGroup.objects.create(name="TestCohort",
course_id=course.id,
group_type=CourseUserGroup.COHORT)
cohort.users.add(user)
self.assertIsNone(get_cohort(user, course.id),
"Course isn't cohorted, so shouldn't have a cohort")
# Make the course cohorted...
self.config_course_cohorts(course, [], cohorted=True)
self.assertEquals(get_cohort(user, course.id).id, cohort.id,
"Should find the right cohort")
self.assertEquals(get_cohort(other_user, course.id), None,
"other_user shouldn't have a cohort")
def test_auto_cohorting(self):
"""
Make sure get_cohort() does the right thing when the course is auto_cohorted
"""
course = modulestore().get_course("edX/toy/2012_Fall")
self.assertEqual(course.id, "edX/toy/2012_Fall")
self.assertFalse(course.is_cohorted)
user1 = User.objects.create(username="test", email="[email protected]")
user2 = User.objects.create(username="test2", email="[email protected]")
user3 = User.objects.create(username="test3", email="[email protected]")
cohort = CourseUserGroup.objects.create(name="TestCohort",
course_id=course.id,
group_type=CourseUserGroup.COHORT)
# user1 manually added to a cohort
cohort.users.add(user1)
# Make the course auto cohorted...
self.config_course_cohorts(course, [], cohorted=True,
auto_cohort=True,
auto_cohort_groups=["AutoGroup"])
self.assertEquals(get_cohort(user1, course.id).id, cohort.id,
"user1 should stay put")
self.assertEquals(get_cohort(user2, course.id).name, "AutoGroup",
"user2 should be auto-cohorted")
# Now make the group list empty
self.config_course_cohorts(course, [], cohorted=True,
auto_cohort=True,
auto_cohort_groups=[])
self.assertEquals(get_cohort(user3, course.id), None,
"No groups->no auto-cohorting")
# Now make it different
self.config_course_cohorts(course, [], cohorted=True,
auto_cohort=True,
auto_cohort_groups=["OtherGroup"])
self.assertEquals(get_cohort(user3, course.id).name, "OtherGroup",
"New list->new group")
self.assertEquals(get_cohort(user2, course.id).name, "AutoGroup",
"user2 should still be in originally placed cohort")
def test_auto_cohorting_randomization(self):
"""
Make sure get_cohort() randomizes properly.
"""
course = modulestore().get_course("edX/toy/2012_Fall")
self.assertEqual(course.id, "edX/toy/2012_Fall")
self.assertFalse(course.is_cohorted)
groups = ["group_{0}".format(n) for n in range(5)]
self.config_course_cohorts(course, [], cohorted=True,
auto_cohort=True,
auto_cohort_groups=groups)
# Assign 100 users to cohorts
for i in range(100):
user = User.objects.create(username="test_{0}".format(i),
email="a@b{0}.com".format(i))
get_cohort(user, course.id)
# Now make sure that the assignment was at least vaguely random:
# each cohort should have at least 1, and fewer than 50 students.
# (with 5 groups, probability of 0 users in any group is about
# .8**100= 2.0e-10)
for cohort_name in groups:
cohort = get_cohort_by_name(course.id, cohort_name)
num_users = cohort.users.count()
self.assertGreater(num_users, 1)
self.assertLess(num_users, 50)
def test_get_course_cohorts(self):
course1_id = 'a/b/c'
course2_id = 'e/f/g'
# add some cohorts to course 1
cohort = CourseUserGroup.objects.create(name="TestCohort",
course_id=course1_id,
group_type=CourseUserGroup.COHORT)
cohort = CourseUserGroup.objects.create(name="TestCohort2",
course_id=course1_id,
group_type=CourseUserGroup.COHORT)
# second course should have no cohorts
self.assertEqual(get_course_cohorts(course2_id), [])
cohorts = sorted([c.name for c in get_course_cohorts(course1_id)])
self.assertEqual(cohorts, ['TestCohort', 'TestCohort2'])
def test_is_commentable_cohorted(self):
course = modulestore().get_course("edX/toy/2012_Fall")
self.assertFalse(course.is_cohorted)
def to_id(name):
return self.topic_name_to_id(course, name)
# no topics
self.assertFalse(is_commentable_cohorted(course.id, to_id("General")),
"Course doesn't even have a 'General' topic")
# not cohorted
self.config_course_cohorts(course, ["General", "Feedback"],
cohorted=False)
self.assertFalse(is_commentable_cohorted(course.id, to_id("General")),
"Course isn't cohorted")
# cohorted, but top level topics aren't
self.config_course_cohorts(course, ["General", "Feedback"],
cohorted=True)
self.assertTrue(course.is_cohorted)
self.assertFalse(is_commentable_cohorted(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't.")
self.assertTrue(
is_commentable_cohorted(course.id, to_id("random")),
"Non-top-level discussion is always cohorted in cohorted courses.")
# cohorted, including "Feedback" top-level topics aren't
self.config_course_cohorts(course, ["General", "Feedback"],
cohorted=True,
cohorted_discussions=["Feedback"])
self.assertTrue(course.is_cohorted)
self.assertFalse(is_commentable_cohorted(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't.")
self.assertTrue(
is_commentable_cohorted(course.id, to_id("Feedback")),
"Feedback was listed as cohorted. Should be.")
|
agpl-3.0
|
Noviat/odoo
|
addons/report_webkit/convert.py
|
322
|
2581
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.tools import convert
original_xml_import = convert.xml_import
class WebkitXMLImport(original_xml_import):
# Override of xml import in order to add webkit_header tag in report tag.
# As discussed with the R&D Team, the current XML processing API does
# not offer enough flexibity to do it in a cleaner way.
# The solution is not meant to be long term solution, but at least
# allows chaining of several overrides of the _tag_report method,
# and does not require a copy/paste of the original code.
def _tag_report(self, cr, rec, data_node=None, mode=None):
report_id = super(WebkitXMLImport, self)._tag_report(cr, rec, data_node)
if rec.get('report_type') == 'webkit':
header = rec.get('webkit_header')
if header:
if header in ('False', '0', 'None'):
webkit_header_id = False
else:
webkit_header_id = self.id_get(cr, header)
self.pool.get('ir.actions.report.xml').write(cr, self.uid,
report_id, {'webkit_header': webkit_header_id})
return report_id
convert.xml_import = WebkitXMLImport
|
agpl-3.0
|
abhiatgithub/shogun-toolbox
|
examples/undocumented/python_modular/multiclass_randomforest_modular.py
|
3
|
1195
|
#!/usr/bin/env python
from numpy import array
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
label_traindat = '../data/label_train_multiclass.dat'
# set both input attributes as not nominal (ie. continuous)
feattypes = array([False, False])
parameter_list = [[traindat,testdat,label_traindat,feattypes]]
def multiclass_randomforest_modular(train=traindat,test=testdat,labels=label_traindat,ft=feattypes):
try:
from modshogun import RealFeatures, MulticlassLabels, CSVFile, RandomForest, MajorityVote
except ImportError:
print("Could not import Shogun modules")
return
# wrap features and labels into Shogun objects
feats_train=RealFeatures(CSVFile(train))
feats_test=RealFeatures(CSVFile(test))
train_labels=MulticlassLabels(CSVFile(labels))
# Random Forest formation
rand_forest=RandomForest(feats_train,train_labels,20,1)
rand_forest.set_feature_types(ft)
rand_forest.set_combination_rule(MajorityVote())
rand_forest.train()
# Classify test data
output=rand_forest.apply_multiclass(feats_test).get_labels()
return rand_forest,output
if __name__=='__main__':
print('RandomForest')
multiclass_randomforest_modular(*parameter_list[0])
|
gpl-3.0
|
philpot/pymod
|
oldwatdb.py
|
1
|
17926
|
#!/usr/bin/python
# Filename: watdb.py
'''
watdb nee trbotdb
@author: Andrew Philpot
@version 0.10
Usage: python watdb.py
Options:
\t-h, --help:\tprint help to STDOUT and quit
\t-v, --verbose:\tverbose output
'''
## major functionality in this module beginning trbot/wat merge (0.6)
##
## 1. layer above web.py's web.database or raw MySQLdb, using jsonconf
## 2. query support layer, tools for query assert/fetch/update invariants
##
##
import sys
import getopt
import jsonconf
import os
import util
from util import iterChunks
import re
## v 0.10
import web
assert web.__version__ == '0.37'
# _orig_interpolate=web.db._interpolate # needed?
def _interpolate_ignore_dollar_sign(format):
# print "enter _interpolate_ignore_dollar_sign"
return [(0, format)]
web.db._interpolate = _interpolate_ignore_dollar_sign
## end v 0.10
from web.db import sqlquote
from collections import defaultdict
from watlog import watlog
logger = watlog("wat.watdb")
logger.info('wat.watdb initialized')
# WE HAVE TWO ENGINES: MySQLdb and webpy
# note that MySQLdb is a zipped python egg and needs to be be able to
# uncompress into a python-eggs directory. For generality when
# running as a web server, I placed a directive in httpd.conf, but one
# could also do something like
# os.environ['PYTHON_EGG__CACHE'] = '/tmp/python-eggs'
import MySQLdb
import web
web.config.debug = False
VERSION = '0.10'
REVISION = "$Revision: 21852 $"
VERBOSE = True
COMMA = ", "
BACKSLASH="\x5c"
SINGLEQUOTE="\x27"
# Must prefix NUL, C-Z, single quote/apostrophe, backslash with backslash
ESCAPABLE="\x00\x1a\x27\x5c"
ENGINE = 'webpy'
CONF = 'test'
INSERTED=1
FETCHED=2
FETCHFAILED=3
SOLOINSERTNOOP=4
TESTING=5
# def kwoteValue(v):
# if str(v).upper() == "NULL":
# return "NULL"
# else:
# return (SINGLEQUOTE
# +
# "".join([BACKSLASH + c if c in ESCAPABLE else c for c in str(v)])
# +
# SINGLEQUOTE)
def kwoteValue(v):
"""Hmm, I would prefer using '\'' instead of reverting to " quotes"""
return str(sqlquote(v))
def wh(column_name, value, rel='='):
"""is sqlquote good enough to prevent SQL injection?"""
if value:
return """(`%s` %s %s)""" % (column_name, rel, kwoteValue(value))
else:
raise ValueError
# 16 January 2013 by Philpot
# Intended as context manager for web.config.debug
# 17 January 2013 by Philpot
# I now believe web.config.debug only works when you do it at the beginning,
# before instantiating anything
class EngineVerbosity():
"""Seems that this should work, but not being reflected in calls to web.db.query"""
def __init__(self, setting):
# self.setting = setting
#print "before, it was %s" % web.config.debug
#print "init to %s" % setting
self._setting = setting
def __enter__(self):
# self.setting.save()
self._old = web.config.debug
web.config.debug = self._setting
# return self.setting
#print "set to %s" % self._setting
#print "wcd %s" % web.config.debug
return web.config.debug
def __exit__(self, type, value, traceback):
# self.setting.restore()
#print "restore to %s" % self._old
web.config.debug = self._old
#del(self._old)
#del(self._setting)
watdbClassNames = defaultdict(lambda : "Watdb",
webpy = "WebpyWatdb",
MySQLdb = "MySQLdbWatdb")
def watdbClassName(engine):
return watdbClassNames[engine]
def watdbClass(engine):
className = watdbClassName(engine)
return globals().get(className)
class Watdb(object):
def __init__(self, verbose=VERBOSE, conf=CONF, engine=ENGINE, test=False):
self.verbose = verbose
self.cfg = None
self.conf = conf
self.test = test
if self.conf:
self.readConfig()
self.specialize(engine or self.cfg.get('engine'))
def __unicode__(self):
engine = hasattr(self, "engine") and self.engine
conf = hasattr(self, "conf") and self.conf
return '<Watdb %s %s>' % (engine, conf)
def __str__(self):
return self.__unicode__()
def __repr__(self):
return self.__unicode__()
def readConfig(self):
root = os.path.join(sys.path[0], "conf")
self.cfg = jsonconf.chooseJson(self.conf, 'db', root=root)
self.engine = self.cfg['engine']
def specialize(self, engine):
try:
self.__class__ = watdbClass(engine)
self.engine = engine
except:
logger.warning("Failed to specialize %s to %s" % (self,engine))
def testing(self):
test = hasattr(self, "test") and self.test
if test:
if hasattr(test, "__call__"):
return test()
else:
return True
return False
def insertionQuery(self, table, formals, values):
"""INSERT IGNORE is MySQL specific..."""
return ("INSERT IGNORE INTO `%s`" % table +
" (" + COMMA.join(["`%s`" % formal for formal in formals]) +") " +
" VALUES (" + COMMA.join([self.kwote(value) for value in values]) + ")")
def fetchQuery(self, table, *columnsAndValues):
"untested"
j = " AND ".join(["(`%s`=%s)" % (column,self.kwote(value))
for column,value
in iterChunks(columnsAndValues, 2)])
query = ("SELECT id FROM `%s`" % table
+
(" WHERE " if columnsAndValues else "")
+
j)
return query
## rest of methods unported
# def maybeQuery(owner, sql):
# """assert SQL, return the record number (if succeed)"""
# if testing(owner):
# logger.info(sql)
# return -1
# else:
# db = owner.db
# cur = db.cursor()
# cur.execute(sql, ())
# return db.insert_id() or None
# def maybeFetch(owner, sql):
# """assert SQL, return the record number (if succeed)"""
# if testing(owner):
# logger.info(sql)
# return []
# else:
# db = owner.db
# cur = db.cursor()
# cur.execute(sql, ())
# rows = cur.fetchall()
# return rows
# def mqfi(owner, sql, table, *columnsAndValues):
# return maybeQuery(owner, sql) or fetchId(owner, table, *columnsAndValues)
# def fetchId(owner, table, *columnsAndValues):
# if testing(owner):
# return -1
# else:
# sql = ("select id from `%s` where" % table +
# " and ".join(["(`%s`=%s)" % (column,kwote(owner,value))
# for column,value
# in iterChunks(columnsAndValues, 2)]))
# db = owner.db
# cur = db.cursor()
# cur.execute(sql, ())
# return cur.fetchone()[0]
# def updateFreq(owner, table, *columnsAndValues):
# if testing(owner):
# return -1
# else:
# sql = ("update " + table + " set `freq`=`freq`+1 where" +
# " and ".join(["(`%s`=%s)" % (column,kwote(owner,value))
# for column,value
# in iterChunks(columnsAndValues, 2)]))
# db = owner.db
# cur = db.cursor()
# cur.execute(sql, ())
# def insertionQuery(owner, table, formals, values):
# return ("INSERT IGNORE INTO `%s`" % table +
# " (" + COMMA.join(["`%s`" % formal for formal in formals]) +") " +
# " VALUES (" + COMMA.join([kwote(owner, value) for value in values]) + ")")
# def fetchQuery(owner, table, *columnsAndValues):
# j = " AND ".join(["(`%s`=%s)" % (column,kwote(owner,value))
# for column,value
# in iterChunks(columnsAndValues, 2)])
# query = ("SELECT id FROM `%s`" % table
# +
# (" WHERE " if columnsAndValues else "")
# +
# j)
# return query
# def ensureId(owner, insert, fetch):
# test = owner.test
# if testing(owner):
# logger.info(insert or "" + "\n" + fetch or "")
# return -1
# else:
# db = owner.db
# cur = db.cursor()
# insert = re.sub(r"""%""", "%%", insert)
# cur.execute(insert, ())
# id = db.insert_id() or None
# if id and id>0:
# return id
# else:
# if fetch:
# fetch = re.sub(r"""%""", "%%", fetch)
# cur.execute(fetch, ())
# all = cur.fetchone()
# return all[0] if all else None
# else:
# logger.warning("solo insert was no-op")
# return None
cxns = dict()
def findCxn(key):
return cxns.get(key, None)
class WebpyWatdb(Watdb):
def __init__(self, verbose=VERBOSE, engine=ENGINE, conf=CONF):
logger.warning("Don't call directly")
def connect(self):
cfg = self.cfg
key = ("mysql",cfg['user'],cfg['password'],cfg['dsname'],cfg['host'])
found = findCxn(key)
if found:
self.cxn = found
self.cursor = lambda: self.cxn
return self
else:
self.cxn = web.database(dbn='mysql', user=cfg['user'], passwd=cfg['password'], db=cfg['dsname'], host=cfg['host'])
self.cursor = lambda: self.cxn
cxns[key] = self.cxn
return self
def disconnect(self):
self.cxn = None
return self.cxn
def kwote(self, thing):
return kwoteValue(thing)
def maybeFetch(self, sql):
"""assumes connected. assert SQL, return the rows"""
if self.testing():
logger.info(sql)
return []
else:
with EngineVerbosity(self.verbose):
rows = self.cxn.query(sql)
return rows
def maybeQuery(self, sql):
"""assumes connected. assert SQL, return the record number (if succeed)"""
if self.testing():
logger.info(sql)
return -1
else:
lid = None
with EngineVerbosity(self.verbose):
succeed = self.cxn.query(sql)
if succeed:
lid = int(self.cxn.query('select last_insert_id() as id')[0].id)
return lid
def fetchId(self, table, *columnsAndValues):
if self.testing():
return -1
else:
sql = ("select id from `%s` where " % table +
" and ".join(["(`%s`=%s)" % (column,self.kwote(value))
for column,value
in iterChunks(columnsAndValues, 2)]))
with EngineVerbosity(self.verbose):
rows = self.cxn.query(sql)
return rows and int(rows[0].id)
def mqfi(self, sql, table, *columnsAndValues):
"""SQL is probably insertionQuery"""
return self.maybeQuery(sql) or self.fetchId(table, *columnsAndValues)
# is this every used? It should be
def updateFreq(self, table, *columnsAndValues):
if self.testing():
return -1
else:
sql = ("update " + table + " set `freq`=`freq`+1 where " +
" and ".join(["(`%s`=%s)" % (column,self.kwote(value))
for column,value
in iterChunks(columnsAndValues, 2)]))
with EngineVerbosity(self.verbose):
self.cxn.query(sql)
def ensureId(self, insert, fetch):
if self.testing():
logger.info(insert or "" + "\n" + fetch or "")
# logger.info("EXIT 0")
return (-1, TESTING)
else:
# for LIKE comparisons?
insert = re.sub(r"""%""", "%%", insert)
lid = None
with EngineVerbosity(self.verbose):
# should wrap this in a transaction?
succeed = self.cxn.query(insert)
if succeed:
lid = self.cxn.query('select last_insert_id() as id')[0].id or None
# id = db.insert_id() or None
if lid and lid>0:
# Case I
# INSERTED
# return (id, True)
# logger.info("EXIT I")
return (int(lid), INSERTED)
else:
if fetch:
fetch = re.sub(r"""%""", "%%", fetch)
all = self.cxn.query(fetch)[0]
if all:
# Case II
# already there, FETCHED
# return (id, False)
# logger.info("EXIT II")
return (int(all.id), FETCHED)
else:
# Case III
# tried to fetch, found nothing
# return (None, False)
logger.warning("FETCH query matched nothing")
# logger.info("EXIT III")
return (None, FETCHFAILED)
else:
logger.warning("solo insert was no-op")
# Case IV
# Fetch was not provided, i.e., insert was mandatory, but it failed
# logger.info("EXIT IV")
return (None, SOLOINSERTNOOP)
class MySQLdbWatdb(Watdb):
def __init__(self, verbose=VERBOSE, engine=ENGINE, conf=CONF):
logger.warning("Don't call directly")
def connect(self):
cfg = self.cfg
self.cxn = MySQLdb.connect(passwd=cfg['password'], db=cfg['dsname'], user=cfg['user'], host=cfg['host'])
self.cursor = self.cxn.cursor
def disconnect(self):
try:
self.cxn.close()
except MySQLdb.ProgrammingError, err:
# don't stress closing a closed connection
pass
return self.cxn
def kwote(self, thing):
try:
# to emulate old trbotdb, use MySQLdb handle's escape method
return self.cxn.escape(util.emittable(thing),MySQLdb.converters.conversions)
except AttributeError:
# fall-through: use our own (like webpy case)
return kwoteValue(thing)
def maybeQuery(self, sql):
"""assumes connected. assert SQL, return the record number (if succeed)"""
if self.testing():
logger.info(sql)
return -1
else:
cur = self.cxn.cursor()
cur.execute(sql, ())
return self.cxn.insert_id() or None
def maybeFetch(self, sql):
"""assumes connected. assert SQL, return the rows"""
if self.testing():
logger.info(sql)
return []
else:
cur = self.cxn.cursor()
# this should work but is not; ### fix
cur.execute(sql, ())
rows = cur.fetchall()
return rows
def main(argv=None):
'''this is called if run from command line'''
# process command line arguments
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(sys.argv[1:], "hv",
["echo=", "help",
"verbose"])
except getopt.error, msg:
print >> sys.stderr, msg
print >> sys.stderr, "for help use --help"
sys.exit(2)
# default options
my_verbose = VERBOSE
# process options
for o, a in opts:
if o in ("-h","--help"):
print __doc__
sys.exit(0)
if o in ("--echo", ):
print a
if o in ("-v", "--verbose", ):
my_verbose = True
if my_verbose:
print >> sys.stderr, "ARGV is %s" % (argv)
w1 = Watdb(conf='test')
print w1
w2 = Watdb(conf='test')
print w2
w2.connect()
print w2.cxn
# print list(w2.maybeFetch('select id,code,tier from markets limit 10'))
import random
i = w2.maybeQuery("insert ignore into markets(code,tier) values('abc',%s)" % (random.randint(0,100)))
print i
print "fetchId"
j = w2.fetchId("markets", "tier", 100)
j = w2.fetchId("phones", "retained", "A'B")
print j
print "\nVerbose"
w2.verbose = True
j = w2.fetchId("phones", "retained", "A'B")
print "\nQuiet"
w2.verbose = False
j = w2.fetchId("phones", "retained", "A'B")
exit(0)
print "mqfi"
iq = "insert ignore into markets(code,tier) values('xyz',102)"
k = w2.mqfi(iq, "markets", "code", "xyz")
print k
# w3 = Watdb(conf='esc000__sigma', engine=False)
# print w3
# w3.connect()
# print w3.cxn
# print w3.maybeFetch('select source,market,city from posts limit 10')
print "updateFreq"
w2.updateFreq("phones", "phone", "3104488201")
w2.updateFreq("phones", "phone", "3104488201")
w2.updateFreq("phones", "phone", "3104488201")
print "insertionQuery"
import random
iq = w2.insertionQuery("phones", ["phone"], [str(random.randint(1111111111,9999999999))])
print iq
print "fetchQuery"
import random
fq = w2.fetchQuery("phones", "phone", str(random.randint(1111111111,9999999999)),
"code", "A'C")
print fq
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
# End of watdb.py
|
apache-2.0
|
DirtyUnicorns/android_external_chromium-org
|
tools/valgrind/scan-build.py
|
103
|
6714
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import os
import re
import sys
import urllib
import urllib2
# Where all the data lives.
ROOT_URL = "http://build.chromium.org/p/chromium.memory.fyi/builders"
# TODO(groby) - support multi-line search from the command line. Useful when
# scanning for classes of failures, see below.
SEARCH_STRING = """<p class=\"failure result\">
Failed memory test: content
</p>"""
# Location of the log cache.
CACHE_DIR = "buildlogs.tmp"
# If we don't find anything after searching |CUTOFF| logs, we're probably done.
CUTOFF = 100
def EnsurePath(path):
"""Makes sure |path| does exist, tries to create it if it doesn't."""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class Cache(object):
def __init__(self, root_dir):
self._root_dir = os.path.abspath(root_dir)
def _LocalName(self, name):
"""If name is a relative path, treat it as relative to cache root.
If it is absolute and under cache root, pass it through.
Otherwise, raise error.
"""
if os.path.isabs(name):
assert os.path.commonprefix([name, self._root_dir]) == self._root_dir
else:
name = os.path.join(self._root_dir, name)
return name
def _FetchLocal(self, local_name):
local_name = self._LocalName(local_name)
EnsurePath(os.path.dirname(local_name))
if os.path.exists(local_name):
f = open(local_name, 'r')
return f.readlines();
return None
def _FetchRemote(self, remote_name):
try:
response = urllib2.urlopen(remote_name)
except:
print "Could not fetch", remote_name
raise
return response.read()
def Update(self, local_name, remote_name):
local_name = self._LocalName(local_name)
EnsurePath(os.path.dirname(local_name))
blob = self._FetchRemote(remote_name)
f = open(local_name, "w")
f.write(blob)
return blob.splitlines()
def FetchData(self, local_name, remote_name):
result = self._FetchLocal(local_name)
if result:
return result
# If we get here, the local cache does not exist yet. Fetch, and store.
return self.Update(local_name, remote_name)
class Builder(object):
def __init__(self, waterfall, name):
self._name = name
self._waterfall = waterfall
def Name(self):
return self._name
def LatestBuild(self):
return self._waterfall.GetLatestBuild(self._name)
def GetBuildPath(self, build_num):
return "%s/%s/builds/%d" % (
self._waterfall._root_url, urllib.quote(self._name), build_num)
def _FetchBuildLog(self, build_num):
local_build_path = "builds/%s" % self._name
local_build_file = os.path.join(local_build_path, "%d.log" % build_num)
return self._waterfall._cache.FetchData(local_build_file,
self.GetBuildPath(build_num))
def _CheckLog(self, build_num, tester):
log_lines = self._FetchBuildLog(build_num)
return any(tester(line) for line in log_lines)
def ScanLogs(self, tester):
occurrences = []
build = self.LatestBuild()
no_results = 0
while build != 0 and no_results < CUTOFF:
if self._CheckLog(build, tester):
occurrences.append(build)
else:
no_results = no_results + 1
build = build - 1
return occurrences
class Waterfall(object):
def __init__(self, root_url, cache_dir):
self._root_url = root_url
self._builders = {}
self._top_revision = {}
self._cache = Cache(cache_dir)
def Builders(self):
return self._builders.values()
def Update(self):
self._cache.Update("builders", self._root_url)
self.FetchInfo()
def FetchInfo(self):
if self._top_revision:
return
html = self._cache.FetchData("builders", self._root_url)
""" Search for both builders and latest build number in HTML
<td class="box"><a href="builders/<builder-name>"> identifies a builder
<a href="builders/<builder-name>/builds/<build-num>"> is the latest build.
"""
box_matcher = re.compile('.*a href[^>]*>([^<]*)\<')
build_matcher = re.compile('.*a href=\"builders/(.*)/builds/([0-9]+)\".*')
last_builder = ""
for line in html:
if 'a href="builders/' in line:
if 'td class="box"' in line:
last_builder = box_matcher.match(line).group(1)
self._builders[last_builder] = Builder(self, last_builder)
else:
result = build_matcher.match(line)
builder = result.group(1)
assert builder == urllib.quote(last_builder)
self._top_revision[last_builder] = int(result.group(2))
def GetLatestBuild(self, name):
self.FetchInfo()
assert self._top_revision
return self._top_revision[name]
class MultiLineChange(object):
def __init__(self, lines):
self._tracked_lines = lines
self._current = 0
def __call__(self, line):
""" Test a single line against multi-line change.
If it matches the currently active line, advance one line.
If the current line is the last line, report a match.
"""
if self._tracked_lines[self._current] in line:
self._current = self._current + 1
if self._current == len(self._tracked_lines):
self._current = 0
return True
else:
self._current = 0
return False
def main(argv):
# Create argument parser.
parser = argparse.ArgumentParser()
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument("--update", action='store_true')
commands.add_argument("--find", metavar='search term')
args = parser.parse_args()
path = os.path.abspath(os.path.dirname(argv[0]))
cache_path = os.path.join(path, CACHE_DIR)
fyi = Waterfall(ROOT_URL, cache_path)
if args.update:
fyi.Update()
for builder in fyi.Builders():
print "Updating", builder.Name()
builder.ScanLogs(lambda x:False)
if args.find:
tester = MultiLineChange(args.find.splitlines())
fyi.FetchInfo()
print "SCANNING FOR ", args.find
for builder in fyi.Builders():
print "Scanning", builder.Name()
occurrences = builder.ScanLogs(tester)
if occurrences:
min_build = min(occurrences)
path = builder.GetBuildPath(min_build)
print "Earliest occurrence in build %d" % min_build
print "Latest occurrence in build %d" % max(occurrences)
print "Latest build: %d" % builder.LatestBuild()
print path
print "%d total" % len(occurrences)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
bsd-3-clause
|
pajlada/pajbot
|
main.py
|
2
|
1666
|
#!/usr/bin/env python3
import logging
import os
import signal
import sys
from pajbot.bot import Bot
from pajbot.utils import parse_args
try:
basestring
except NameError:
basestring = str
# XXX: What does this achieve exactly?
os.chdir(os.path.dirname(os.path.realpath(__file__)))
log = logging.getLogger(__name__)
def run(args):
from pajbot.utils import load_config
config = load_config(args.config)
if "main" not in config:
log.error("Missing section [main] in config")
sys.exit(1)
if "sql" in config:
log.error(
"The [sql] section in config is no longer used. See the example config for the new format under [main]."
)
sys.exit(1)
if "db" not in config["main"]:
log.error("Missing required db config in the [main] section.")
sys.exit(1)
pajbot = Bot(config, args)
pajbot.connect()
def on_sigterm(signal, frame):
pajbot.quit_bot()
sys.exit(0)
signal.signal(signal.SIGTERM, on_sigterm)
try:
pajbot.start()
except KeyboardInterrupt:
pajbot.quit_bot()
def handle_exceptions(exctype, value, tb):
log.error("Logging an uncaught exception", exc_info=(exctype, value, tb))
if __name__ == "__main__":
from pajbot.utils import init_logging, dump_threads
def on_sigusr1(signal, frame):
log.info("Process was interrupted with SIGUSR1, dumping all thread stack traces")
dump_threads()
# dump all stack traces on SIGUSR1
signal.signal(signal.SIGUSR1, on_sigusr1)
sys.excepthook = handle_exceptions
args = parse_args()
init_logging("pajbot")
run(args)
|
mit
|
zdszxp/gamesrc
|
Trdlib/src/boost_1_60_0/tools/build/src/build/build_request.py
|
11
|
8094
|
# Status: being ported by Vladimir Prus
# TODO: need to re-compare with mainline of .jam
# Base revision: 40480
#
# (C) Copyright David Abrahams 2002. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
import b2.build.feature
feature = b2.build.feature
from b2.util.utility import *
from b2.util import is_iterable_typed
import b2.build.property_set as property_set
def expand_no_defaults (property_sets):
""" Expand the given build request by combining all property_sets which don't
specify conflicting non-free features.
"""
assert is_iterable_typed(property_sets, property_set.PropertySet)
# First make all features and subfeatures explicit
expanded_property_sets = [ps.expand_subfeatures() for ps in property_sets]
# Now combine all of the expanded property_sets
product = __x_product (expanded_property_sets)
return [property_set.create(p) for p in product]
def __x_product (property_sets):
""" Return the cross-product of all elements of property_sets, less any
that would contain conflicting values for single-valued features.
"""
assert is_iterable_typed(property_sets, property_set.PropertySet)
x_product_seen = set()
return __x_product_aux (property_sets, x_product_seen)[0]
def __x_product_aux (property_sets, seen_features):
"""Returns non-conflicting combinations of property sets.
property_sets is a list of PropertySet instances. seen_features is a set of Property
instances.
Returns a tuple of:
- list of lists of Property instances, such that within each list, no two Property instance
have the same feature, and no Property is for feature in seen_features.
- set of features we saw in property_sets
"""
assert is_iterable_typed(property_sets, property_set.PropertySet)
assert isinstance(seen_features, set)
if not property_sets:
return ([], set())
properties = property_sets[0].all()
these_features = set()
for p in property_sets[0].non_free():
these_features.add(p.feature())
# Note: the algorithm as implemented here, as in original Jam code, appears to
# detect conflicts based on features, not properties. For example, if command
# line build request say:
#
# <a>1/<b>1 c<1>/<b>1
#
# It will decide that those two property sets conflict, because they both specify
# a value for 'b' and will not try building "<a>1 <c1> <b1>", but rather two
# different property sets. This is a topic for future fixing, maybe.
if these_features & seen_features:
(inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features)
return (inner_result, inner_seen | these_features)
else:
result = []
(inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features | these_features)
if inner_result:
for inner in inner_result:
result.append(properties + inner)
else:
result.append(properties)
if inner_seen & these_features:
# Some of elements in property_sets[1:] conflict with elements of property_sets[0],
# Try again, this time omitting elements of property_sets[0]
(inner_result2, inner_seen2) = __x_product_aux(property_sets[1:], seen_features)
result.extend(inner_result2)
return (result, inner_seen | these_features)
def looks_like_implicit_value(v):
"""Returns true if 'v' is either implicit value, or
the part before the first '-' symbol is implicit value."""
assert isinstance(v, basestring)
if feature.is_implicit_value(v):
return 1
else:
split = v.split("-")
if feature.is_implicit_value(split[0]):
return 1
return 0
def from_command_line(command_line):
"""Takes the command line tokens (such as taken from ARGV rule)
and constructs build request from it. Returns a list of two
lists. First is the set of targets specified in the command line,
and second is the set of requested build properties."""
assert is_iterable_typed(command_line, basestring)
targets = []
properties = []
for e in command_line:
if e[:1] != "-":
# Build request spec either has "=" in it, or completely
# consists of implicit feature values.
if e.find("=") != -1 or looks_like_implicit_value(e.split("/")[0]):
properties.append(e)
elif e:
targets.append(e)
return [targets, properties]
# Converts one element of command line build request specification into
# internal form.
def convert_command_line_element(e):
assert isinstance(e, basestring)
result = None
parts = e.split("/")
for p in parts:
m = p.split("=")
if len(m) > 1:
feature = m[0]
values = m[1].split(",")
lresult = [("<%s>%s" % (feature, v)) for v in values]
else:
lresult = p.split(",")
if p.find('-') == -1:
# FIXME: first port property.validate
# property.validate cannot handle subfeatures,
# so we avoid the check here.
#for p in lresult:
# property.validate(p)
pass
if not result:
result = lresult
else:
result = [e1 + "/" + e2 for e1 in result for e2 in lresult]
return [property_set.create(b2.build.feature.split(r)) for r in result]
###
### rule __test__ ( )
### {
### import assert feature ;
###
### feature.prepare-test build-request-test-temp ;
###
### import build-request ;
### import build-request : expand_no_defaults : build-request.expand_no_defaults ;
### import errors : try catch ;
### import feature : feature subfeature ;
###
### feature toolset : gcc msvc borland : implicit ;
### subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4
### 3.0 3.0.1 3.0.2 : optional ;
###
### feature variant : debug release : implicit composite ;
### feature inlining : on off ;
### feature "include" : : free ;
###
### feature stdlib : native stlport : implicit ;
###
### feature runtime-link : dynamic static : symmetric ;
###
###
### local r ;
###
### r = [ build-request.from-command-line bjam debug runtime-link=dynamic ] ;
### assert.equal [ $(r).get-at 1 ] : ;
### assert.equal [ $(r).get-at 2 ] : debug <runtime-link>dynamic ;
###
### try ;
### {
###
### build-request.from-command-line bjam gcc/debug runtime-link=dynamic/static ;
### }
### catch \"static\" is not a value of an implicit feature ;
###
###
### r = [ build-request.from-command-line bjam -d2 --debug debug target runtime-link=dynamic ] ;
### assert.equal [ $(r).get-at 1 ] : target ;
### assert.equal [ $(r).get-at 2 ] : debug <runtime-link>dynamic ;
###
### r = [ build-request.from-command-line bjam debug runtime-link=dynamic,static ] ;
### assert.equal [ $(r).get-at 1 ] : ;
### assert.equal [ $(r).get-at 2 ] : debug <runtime-link>dynamic <runtime-link>static ;
###
### r = [ build-request.from-command-line bjam debug gcc/runtime-link=dynamic,static ] ;
### assert.equal [ $(r).get-at 1 ] : ;
### assert.equal [ $(r).get-at 2 ] : debug gcc/<runtime-link>dynamic
### gcc/<runtime-link>static ;
###
### r = [ build-request.from-command-line bjam msvc gcc,borland/runtime-link=static ] ;
### assert.equal [ $(r).get-at 1 ] : ;
### assert.equal [ $(r).get-at 2 ] : msvc gcc/<runtime-link>static
### borland/<runtime-link>static ;
###
### r = [ build-request.from-command-line bjam gcc-3.0 ] ;
### assert.equal [ $(r).get-at 1 ] : ;
### assert.equal [ $(r).get-at 2 ] : gcc-3.0 ;
###
### feature.finish-test build-request-test-temp ;
### }
###
###
|
gpl-3.0
|
p0psicles/SickRage
|
lib/feedparser/sgml.py
|
22
|
2682
|
from __future__ import absolute_import
import re
__all__ = [
'_SGML_AVAILABLE',
'sgmllib',
'charref',
'tagfind',
'attrfind',
'entityref',
'incomplete',
'interesting',
'shorttag',
'shorttagopen',
'starttagopen',
'endbracket',
]
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content santizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
|
gpl-3.0
|
johndpope/tensorflow
|
tensorflow/python/training/session_run_hook.py
|
40
|
10195
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A SessionRunHook extends `session.run()` calls for the `MonitoredSession`.
SessionRunHooks are useful to track training, report progress, request early
stopping and more. SessionRunHooks use the observer pattern and notify at the
following points:
- when a session starts being used
- before a call to the `session.run()`
- after a call to the `session.run()`
- when the session closed
A SessionRunHook encapsulates a piece of reusable/composable computation that
can piggyback a call to `MonitoredSession.run()`. A hook can add any
ops-or-tensor/feeds to the run call, and when the run call finishes with success
gets the outputs it requested. Hooks are allowed to add ops to the graph in
`hook.begin()`. The graph is finalized after the `begin()` method is called.
There are a few pre-defined monitors:
- StopAtStepHook: Request stop based on global_step
- CheckpointSaverHook: saves checkpoint
- LoggingTensorHook: outputs one or more tensor values to log
- NanTensorHook: Request stop if given `Tensor` contains Nans.
- SummarySaverHook: saves summaries to a summary writer
For more specific needs, you can create custom hooks:
class ExampleHook(SessionRunHook):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def end(self, session):
print('Done with the session.')
def before_run(self, run_context):
print('before calling session.run)
return SessionRunArgs(self.your_tensor)
def after_run(self, run_context, run_values)
print('Done running one step. The value of my tensor: %s',
run_values.results)
if you-need-to-stop-loop:
run_context.request_stop()
To understand how hooks interact with calls to `MonitoredSession.run()`,
look at following code:
with SupervisedSession(hooks=your_hooks, ...) as sess
while not sess.should_stop()
sess.run(your_fetches)
Above user code leads to following execution:
call hooks.begin()
sess = tf.Session()
call hooks.after_create_session()
while not stop is requested:
call hooks.before_run()
try:
results = sess.run(merged_fetches)
except (errors.OutOfRangeError, StopIteration):
break
call hooks.after_run()
call hooks.end()
sess.close()
Note that if sess.run() raises OutOfRangeError or StopIteration then
hooks.after_run() will not be called but hooks.end() will still be called.
If sess.run() raises any other exception then neither hooks.after_run() nor
hooks.end() will be called.
@@SessionRunHook
@@SessionRunArgs
@@SessionRunContext
@@SessionRunValues
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
class SessionRunHook(object):
"""Hook to extend calls to MonitoredSession.run()."""
def begin(self):
"""Called once before using the session.
When called, the default graph is the one that will be launched in the
session. The hook can modify the graph by adding new operations to it.
After the `begin()` call the graph will be finalized and the other callbacks
can not modify the graph anymore. Second call of `begin()` on the same
graph, should not change the graph.
"""
pass
def after_create_session(self, session, coord): # pylint: disable=unused-argument
"""Called when new TensorFlow session is created.
This is called to signal the hooks that a new session has been created. This
has two essential differences with the situation in which `begin` is called:
* When this is called, the graph is finalized and ops can no longer be added
to the graph.
* This method will also be called as a result of recovering a wrapped
session, not only at the beginning of the overall session.
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
pass
def before_run(self, run_context): # pylint: disable=unused-argument
"""Called before each call to run().
You can return from this call a `SessionRunArgs` object indicating ops or
tensors to add to the upcoming `run()` call. These ops/tensors will be run
together with the ops/tensors originally passed to the original run() call.
The run args you return can also contain feeds to be added to the run()
call.
The `run_context` argument is a `SessionRunContext` that provides
information about the upcoming `run()` call: the originally requested
op/tensors, the TensorFlow Session.
At this point graph is finalized and you can not add ops.
Args:
run_context: A `SessionRunContext` object.
Returns:
None or a `SessionRunArgs` object.
"""
return None
def after_run(self,
run_context, # pylint: disable=unused-argument
run_values): # pylint: disable=unused-argument
"""Called after each call to run().
The `run_values` argument contains results of requested ops/tensors by
`before_run()`.
The `run_context` argument is the same one send to `before_run` call.
`run_context.request_stop()` can be called to stop the iteration.
If `session.run()` raises any exceptions then `after_run()` is not called.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
pass
def end(self, session): # pylint: disable=unused-argument
"""Called at the end of session.
The `session` argument can be used in case the hook wants to run final ops,
such as saving a last checkpoint.
If `session.run()` raises exception other than OutOfRangeError or
StopIteration then `end()` is not called.
Note the difference between `end()` and `after_run()` behavior when
`session.run()` raises OutOfRangeError or StopIteration. In that case
`end()` is called but `after_run()` is not called.
Args:
session: A TensorFlow Session that will be soon closed.
"""
pass
class SessionRunArgs(
collections.namedtuple("SessionRunArgs",
["fetches", "feed_dict", "options"])):
"""Represents arguments to be added to a `Session.run()` call.
Args:
fetches: Exactly like the 'fetches' argument to Session.Run().
Can be a single tensor or op, a list of 'fetches' or a dictionary
of fetches. For example:
fetches = global_step_tensor
fetches = [train_op, summary_op, global_step_tensor]
fetches = {'step': global_step_tensor, 'summ': summary_op}
Note that this can recurse as expected:
fetches = {'step': global_step_tensor,
'ops': [train_op, check_nan_op]}
feed_dict: Exactly like the `feed_dict` argument to `Session.Run()`
options: Exactly like the `options` argument to `Session.run()`, i.e., a
config_pb2.RunOptions proto.
"""
def __new__(cls, fetches, feed_dict=None, options=None):
return super(SessionRunArgs, cls).__new__(cls, fetches, feed_dict, options)
class SessionRunContext(object):
"""Provides information about the `session.run()` call being made.
Provides information about original request to `Session.Run()` function.
SessionRunHook objects can stop the loop by calling `request_stop()` of
`run_context`. In the future we may use this object to add more information
about run without changing the Hook API.
"""
def __init__(self, original_args, session):
"""Initializes SessionRunContext."""
self._original_args = original_args
self._session = session
self._stop_requested = False
@property
def original_args(self):
"""A `SessionRunArgs` object holding the original arguments of `run()`.
If user called `MonitoredSession.run(fetches=a, feed_dict=b)`, then this
field is equal to SessionRunArgs(a, b).
Returns:
A `SessionRunArgs` object
"""
return self._original_args
@property
def session(self):
"""A TensorFlow session object which will execute the `run`."""
return self._session
@property
def stop_requested(self):
"""Returns whether a stop is requested or not.
If true, `MonitoredSession` stops iterations.
Returns:
A `bool`
"""
return self._stop_requested
def request_stop(self):
"""Sets stop requested field.
Hooks can use this function to request stop of iterations.
`MonitoredSession` checks whether this is called or not.
"""
self._stop_requested = True
class SessionRunValues(
collections.namedtuple("SessionRunValues",
["results", "options", "run_metadata"])):
"""Contains the results of `Session.run()`.
In the future we may use this object to add more information about result of
run without changing the Hook API.
Args:
results: The return values from `Session.run()` corresponding to the fetches
attribute returned in the RunArgs. Note that this has the same shape as
the RunArgs fetches. For example:
fetches = global_step_tensor
=> results = nparray(int)
fetches = [train_op, summary_op, global_step_tensor]
=> results = [None, nparray(string), nparray(int)]
fetches = {'step': global_step_tensor, 'summ': summary_op}
=> results = {'step': nparray(int), 'summ': nparray(string)}
options: `RunOptions` from the `Session.run()` call.
run_metadata: `RunMetadata` from the `Session.run()` call.
"""
|
apache-2.0
|
sebres/fail2ban
|
fail2ban/client/filterreader.py
|
4
|
3257
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import os
import shlex
from .configreader import DefinitionInitConfigReader
from ..helpers import getLogger
# Gets the instance of the logger.
logSys = getLogger(__name__)
class FilterReader(DefinitionInitConfigReader):
_configOpts = {
"usedns": ["string", None],
"prefregex": ["string", None],
"ignoreregex": ["string", None],
"failregex": ["string", None],
"maxlines": ["int", None],
"datepattern": ["string", None],
"journalmatch": ["string", None],
}
def setFile(self, fileName):
self.__file = fileName
DefinitionInitConfigReader.setFile(self, os.path.join("filter.d", fileName))
def getFile(self):
return self.__file
def applyAutoOptions(self, backend):
# set init option to backend-related logtype, considering
# that the filter settings may be overwritten in its local:
if (not self._initOpts.get('logtype') and
not self.has_option('Definition', 'logtype', False)
):
self._initOpts['logtype'] = ['file','journal'][int(backend.startswith("systemd"))]
def convert(self):
stream = list()
opts = self.getCombined()
if not len(opts):
return stream
return FilterReader._fillStream(stream, opts, self._jailName)
@staticmethod
def _fillStream(stream, opts, jailName):
prio0idx = 0
for opt, value in opts.iteritems():
if opt in ("failregex", "ignoreregex"):
if value is None: continue
multi = []
for regex in value.split('\n'):
# Do not send a command if the rule is empty.
if regex != '':
multi.append(regex)
if len(multi) > 1:
stream.append(["multi-set", jailName, "add" + opt, multi])
elif len(multi):
stream.append(["set", jailName, "add" + opt, multi[0]])
elif opt in ('usedns', 'maxlines', 'prefregex'):
# Be sure we set this options first, and usedns is before all regex(s).
stream.insert(0 if opt == 'usedns' else prio0idx,
["set", jailName, opt, value])
prio0idx += 1
elif opt in ('datepattern'):
stream.append(["set", jailName, opt, value])
elif opt == 'journalmatch':
# Do not send a command if the match is empty.
if value is None: continue
for match in value.split("\n"):
if match == '': continue
stream.append(
["set", jailName, "addjournalmatch"] + shlex.split(match))
return stream
|
gpl-2.0
|
dharrigan/plugins
|
security/etpro-telemetry/src/opnsense/scripts/etpro_telemetry/dump_data.py
|
2
|
2514
|
#!/usr/local/bin/python3
"""
Copyright (c) 2018-2019 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import argparse
import urllib3
import datetime
import ujson
import telemetry.log
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--log', help='log directory containing eve.json files', default="/var/log/suricata/")
parser.add_argument('-t', '--time', help='max seconds to read from now()', type=int, default=3600)
parser.add_argument('-p', '--parsed', help='show data as shipped using send_telemetry',
default=False, action="store_true")
parser.add_argument('-L', '--limit', help='limit number of rows', type=int, default=-1)
args = parser.parse_args()
last_update = datetime.datetime.now() - datetime.timedelta(seconds=float(args.time))
event_collector = telemetry.EventCollector()
row_count = 0
for record in telemetry.log.reader(args.log, last_update=last_update):
if args.parsed:
event_collector.push(record)
else:
print (ujson.dumps(record))
row_count += 1
if args.limit != -1 and row_count >= args.limit:
break
if args.parsed:
for push_data in event_collector:
print (push_data.strip())
|
bsd-2-clause
|
nhenezi/kuma
|
vendor/packages/nose/unit_tests/test_pdb_plugin.py
|
10
|
3241
|
import sys
import unittest
from nose.config import Config
from nose.plugins import debug
from optparse import OptionParser
from StringIO import StringIO
class StubPdb:
called = False
def post_mortem(self, tb):
self.called = True
class TestPdbPlugin(unittest.TestCase):
def setUp(self):
self._pdb = debug.pdb
self._so = sys.stdout
debug.pdb = StubPdb()
def tearDown(self):
debug.pdb = self._pdb
sys.stdout = self._so
def test_plugin_api(self):
p = debug.Pdb()
p.addOptions
p.configure
p.addError
p.addFailure
def test_plugin_calls_pdb(self):
p = debug.Pdb()
try:
raise Exception("oops")
except:
err = sys.exc_info()
p.enabled = True
p.enabled_for_errors = True
p.enabled_for_failures = True
p.addError(None, err)
assert debug.pdb.called, "Did not call pdb.post_mortem on error"
debug.pdb.called = False
p.addFailure(None, err)
assert debug.pdb.called, "Did not call pdb.post_mortem on failure"
def test_command_line_options_enable(self):
parser = OptionParser()
p = debug.Pdb()
p.addOptions(parser)
options, args = parser.parse_args(['test_configuration',
'--pdb',
'--pdb-failures'])
p.configure(options, Config())
assert p.enabled
assert p.enabled_for_errors
assert p.enabled_for_failures
def test_disabled_by_default(self):
p = debug.Pdb()
assert not p.enabled
assert not p.enabled_for_failures
parser = OptionParser()
p.addOptions(parser)
options, args = parser.parse_args(['test_configuration'])
p.configure(options, Config())
assert not p.enabled
assert not p.enabled_for_errors
assert not p.enabled_for_failures
def test_env_settings_enable(self):
p = debug.Pdb()
assert not p.enabled
assert not p.enabled_for_failures
env = {'NOSE_PDB': '1',
'NOSE_PDB_FAILURES': '1'}
parser = OptionParser()
p.addOptions(parser, env)
options, args = parser.parse_args(['test_configuration'])
p.configure(options, Config())
assert p.enabled
assert p.enabled_for_errors
assert p.enabled_for_failures
def test_real_stdout_restored_before_call(self):
class CheckStdout(StubPdb):
def post_mortem(self, tb):
assert sys.stdout is sys.__stdout__, \
"sys.stdout was not restored to sys.__stdout__ " \
"before call"
debug.pdb = CheckStdout()
patch = StringIO()
sys.stdout = patch
p = debug.Pdb()
p.enabled = True
p.enabled_for_errors = True
try:
raise Exception("oops")
except:
err = sys.exc_info()
p.addError(None, err)
assert sys.stdout is patch, "sys.stdout was not reset after call"
if __name__ == '__main__':
unittest.main()
|
mpl-2.0
|
reinaldomaslim/Singaboat_RobotX2016
|
robotx_nav/nodes/mc_scancode_planner.py
|
1
|
1595
|
#!/usr/bin/env python
""" task 4:
-----------------
Created by Ren Ye @ 2016-11-06
Authors: Ren Ye, Reinaldo
-----------------
scan the code
"""
import rospy
import math
import time
import numpy as np
from sklearn.cluster import KMeans
from sklearn import svm
import random
from geometry_msgs.msg import Point, Pose
from visualization_msgs.msg import MarkerArray, Marker
from move_base_loiter import Loiter
from tf.transformations import euler_from_quaternion
from nav_msgs.msg import Odometry
from std_msgs.msg import String
import planner_utils
class MCScanTheCode(object):
def __init__(self, nodename="scanthecode"):
# print("starting task 4")
rospy.init_node(nodename, anonymous=False)
self.rate = rospy.get_param("~rate", 1)
self.loiter = Loiter("loiter", is_newnode=False, target=None, mode=2, is_relative=True)
self.planner()
def planner(self):
self.loiter.respawn(target=[5, 0, 0], polygon=6, radius=3)
color = ["red", "red", "red"]
for i in range(3):
color[i] = self.mc_color()
if i >= 1:
while color[i] == color[i-1]:
color[i] = self.mc_color()
rospy.set_param("/gui/color1", color[0])
rospy.set_param("/gui/color2", color[1])
rospy.set_param("/gui/color3", color[2])
def mc_color(self):
return random.choice(["red", "green", "blue", "yellow"])
if __name__ == '__main__':
try:
MCScanTheCode()
except rospy.ROSInterruptException:
rospy.loginfo("Task 4 Finished")
|
gpl-3.0
|
lochiiconnectivity/boto
|
tests/integration/iam/test_cert_verification.py
|
10
|
1495
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
import boto.iam
class CertVerificationTest(unittest.TestCase):
iam = True
ssl = True
def test_certs(self):
for region in boto.iam.regions():
c = region.connect()
c.get_all_users()
|
mit
|
fishilico/selinux-refpolicy-patched
|
support/sedoctool.py
|
13
|
25392
|
#!/usr/bin/python
# Author: Joshua Brindle <[email protected]>
# Caleb Case <[email protected]>
#
# Copyright (C) 2005 - 2006 Tresys Technology, LLC
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2.
"""
This module generates configuration files and documentation from the
SELinux reference policy XML format.
"""
import sys
import getopt
import pyplate
import os
import string
from xml.dom.minidom import parse, parseString
#modules enabled and disabled values
MOD_BASE = "base"
MOD_ENABLED = "module"
MOD_DISABLED = "off"
#booleans enabled and disabled values
BOOL_ENABLED = "true"
BOOL_DISABLED = "false"
#tunables enabled and disabled values
TUN_ENABLED = "true"
TUN_DISABLED = "false"
def read_policy_xml(filename):
"""
Takes in XML from a file and returns a parsed file.
"""
try:
xml_fh = open(filename)
except:
error("error opening " + filename)
try:
doc = parseString(xml_fh.read())
except:
xml_fh.close()
error("Error while parsing xml")
xml_fh.close()
return doc
def gen_booleans_conf(doc, file_name, namevalue_list):
"""
Generates the booleans configuration file using the XML provided and the
previous booleans configuration.
"""
for node in doc.getElementsByTagName("bool"):
for desc in node.getElementsByTagName("desc"):
bool_desc = format_txt_desc(desc)
s = bool_desc.split("\n")
file_name.write("#\n")
for line in s:
file_name.write("# %s\n" % line)
bool_name = bool_val = None
for (name, value) in node.attributes.items():
if name == "name":
bool_name = value
elif name == "dftval":
bool_val = value
if [bool_name,BOOL_ENABLED] in namevalue_list:
bool_val = BOOL_ENABLED
elif [bool_name,BOOL_DISABLED] in namevalue_list:
bool_val = BOOL_DISABLED
if bool_name and bool_val:
file_name.write("%s = %s\n\n" % (bool_name, bool_val))
bool_name = bool_val = None
# tunables are currently implemented as booleans
for node in doc.getElementsByTagName("tunable"):
for desc in node.getElementsByTagName("desc"):
bool_desc = format_txt_desc(desc)
s = bool_desc.split("\n")
file_name.write("#\n")
for line in s:
file_name.write("# %s\n" % line)
bool_name = bool_val = None
for (name, value) in node.attributes.items():
if name == "name":
bool_name = value
elif name == "dftval":
bool_val = value
if [bool_name,BOOL_ENABLED] in namevalue_list:
bool_val = BOOL_ENABLED
elif [bool_name,BOOL_DISABLED] in namevalue_list:
bool_val = BOOL_DISABLED
if bool_name and bool_val:
file_name.write("%s = %s\n\n" % (bool_name, bool_val))
bool_name = bool_val = None
def gen_module_conf(doc, file_name, namevalue_list):
"""
Generates the module configuration file using the XML provided and the
previous module configuration.
"""
# If file exists, preserve settings and modify if needed.
# Otherwise, create it.
file_name.write("#\n# This file contains a listing of available modules.\n")
file_name.write("# To prevent a module from being used in policy\n")
file_name.write("# creation, set the module name to \"%s\".\n#\n" % MOD_DISABLED)
file_name.write("# For monolithic policies, modules set to \"%s\" and \"%s\"\n" % (MOD_BASE, MOD_ENABLED))
file_name.write("# will be built into the policy.\n#\n")
file_name.write("# For modular policies, modules set to \"%s\" will be\n" % MOD_BASE)
file_name.write("# included in the base module. \"%s\" will be compiled\n" % MOD_ENABLED)
file_name.write("# as individual loadable modules.\n#\n\n")
# For required in [True,False] is present so that the requiered modules
# are at the top of the config file.
for required in [True,False]:
for node in doc.getElementsByTagName("module"):
mod_req = False
for req in node.getElementsByTagName("required"):
if req.getAttribute("val") == "true":
mod_req = True
# Skip if we arnt working on the right set of modules.
if mod_req and not required or not mod_req and required:
continue
mod_name = mod_layer = None
mod_name = node.getAttribute("name")
mod_layer = node.parentNode.getAttribute("name")
if mod_name and mod_layer:
file_name.write("# Layer: %s\n# Module: %s\n" % (mod_layer,mod_name))
if required:
file_name.write("# Required in base\n")
file_name.write("#\n")
for desc in node.getElementsByTagName("summary"):
if not desc.parentNode == node:
continue
s = format_txt_desc(desc).split("\n")
for line in s:
file_name.write("# %s\n" % line)
# If the module is set as disabled.
if [mod_name, MOD_DISABLED] in namevalue_list:
file_name.write("%s = %s\n\n" % (mod_name, MOD_DISABLED))
# If the module is set as enabled.
elif [mod_name, MOD_ENABLED] in namevalue_list:
file_name.write("%s = %s\n\n" % (mod_name, MOD_ENABLED))
# If the module is set as base.
elif [mod_name, MOD_BASE] in namevalue_list:
file_name.write("%s = %s\n\n" % (mod_name, MOD_BASE))
# If the module is a new module.
else:
# Set the module to base if it is marked as required.
if mod_req:
file_name.write("%s = %s\n\n" % (mod_name, MOD_BASE))
# Set the module to enabled if it is not required.
else:
file_name.write("%s = %s\n\n" % (mod_name, MOD_ENABLED))
def get_conf(conf):
"""
Returns a list of [name, value] pairs from a config file with the format
name = value
"""
conf_lines = conf.readlines()
namevalue_list = []
for i in range(0,len(conf_lines)):
line = conf_lines[i]
if line.strip() != '' and line.strip()[0] != "#":
namevalue = line.strip().split("=")
if len(namevalue) != 2:
warning("line %d: \"%s\" is not a valid line, skipping"\
% (i, line.strip()))
continue
namevalue[0] = namevalue[0].strip()
if len(namevalue[0].split()) > 1:
warning("line %d: \"%s\" is not a valid line, skipping"\
% (i, line.strip()))
continue
namevalue[1] = namevalue[1].strip()
if len(namevalue[1].split()) > 1:
warning("line %d: \"%s\" is not a valid line, skipping"\
% (i, line.strip()))
continue
namevalue_list.append(namevalue)
return namevalue_list
def first_cmp_func(a):
"""
Return the first element to sort/compare on.
"""
return a[0]
def int_cmp_func(a):
"""
Return the interface name to sort/compare on.
"""
return a["interface_name"]
def temp_cmp_func(a):
"""
Return the template name to sort/compare on.
"""
return a["template_name"]
def tun_cmp_func(a):
"""
Return the tunable name to sort/compare on.
"""
return a["tun_name"]
def bool_cmp_func(a):
"""
Return the boolean name to sort/compare on.
"""
return a["bool_name"]
def gen_doc_menu(mod_layer, module_list):
"""
Generates the HTML document menu.
"""
menu = []
for layer, value in module_list.items():
cur_menu = (layer, [])
menu.append(cur_menu)
if layer != mod_layer and mod_layer != None:
continue
#we are in our layer so fill in the other modules or we want them all
for mod, desc in value.items():
cur_menu[1].append((mod, desc))
menu.sort(key=first_cmp_func)
for x in menu:
x[1].sort(key=first_cmp_func)
return menu
def format_html_desc(node):
"""
Formats a XML node into a HTML format.
"""
desc_buf = ''
for desc in node.childNodes:
if desc.nodeName == "#text":
if desc.data is not '':
if desc.parentNode.nodeName != "p":
desc_buf += "<p>" + desc.data + "</p>"
else:
desc_buf += desc.data
else:
desc_buf += "<" + desc.nodeName + ">" \
+ format_html_desc(desc) \
+ "</" + desc.nodeName +">"
return desc_buf
def format_txt_desc(node):
"""
Formats a XML node into a plain text format.
"""
desc_buf = ''
for desc in node.childNodes:
if desc.nodeName == "#text":
desc_buf += desc.data + "\n"
elif desc.nodeName == "p":
desc_buf += desc.firstChild.data + "\n"
for chld in desc.childNodes:
if chld.nodeName == "ul":
desc_buf += "\n"
for li in chld.getElementsByTagName("li"):
desc_buf += "\t -" + li.firstChild.data + "\n"
return desc_buf.strip() + "\n"
def gen_docs(doc, working_dir, templatedir):
"""
Generates all the documentation.
"""
try:
#get the template data ahead of time so we don't reopen them over and over
bodyfile = open(templatedir + "/header.html", "r")
bodydata = bodyfile.read()
bodyfile.close()
intfile = open(templatedir + "/interface.html", "r")
intdata = intfile.read()
intfile.close()
templatefile = open(templatedir + "/template.html", "r")
templatedata = templatefile.read()
templatefile.close()
tunfile = open(templatedir + "/tunable.html", "r")
tundata = tunfile.read()
tunfile.close()
boolfile = open(templatedir + "/boolean.html", "r")
booldata = boolfile.read()
boolfile.close()
menufile = open(templatedir + "/menu.html", "r")
menudata = menufile.read()
menufile.close()
indexfile = open(templatedir + "/module_list.html","r")
indexdata = indexfile.read()
indexfile.close()
modulefile = open(templatedir + "/module.html","r")
moduledata = modulefile.read()
modulefile.close()
intlistfile = open(templatedir + "/int_list.html", "r")
intlistdata = intlistfile.read()
intlistfile.close()
templistfile = open(templatedir + "/temp_list.html", "r")
templistdata = templistfile.read()
templistfile.close()
tunlistfile = open(templatedir + "/tun_list.html", "r")
tunlistdata = tunlistfile.read()
tunlistfile.close()
boollistfile = open(templatedir + "/bool_list.html", "r")
boollistdata = boollistfile.read()
boollistfile.close()
gboollistfile = open(templatedir + "/global_bool_list.html", "r")
gboollistdata = gboollistfile.read()
gboollistfile.close()
gtunlistfile = open(templatedir + "/global_tun_list.html", "r")
gtunlistdata = gtunlistfile.read()
gtunlistfile.close()
except:
error("Could not open templates")
try:
os.chdir(working_dir)
except:
error("Could not chdir to target directory")
#arg, i have to go through this dom tree ahead of time to build up the menus
module_list = {}
for node in doc.getElementsByTagName("module"):
mod_name = mod_layer = interface_buf = ''
mod_name = node.getAttribute("name")
mod_layer = node.parentNode.getAttribute("name")
for desc in node.getElementsByTagName("summary"):
if desc.parentNode == node and desc:
mod_summary = format_html_desc(desc)
if not mod_layer in module_list:
module_list[mod_layer] = {}
module_list[mod_layer][mod_name] = mod_summary
#generate index pages
main_content_buf = ''
for mod_layer,modules in module_list.items():
menu = gen_doc_menu(mod_layer, module_list)
layer_summary = None
for desc in doc.getElementsByTagName("summary"):
if desc.parentNode.getAttribute("name") == mod_layer:
layer_summary = format_html_desc(desc)
menu_args = { "menulist" : menu,
"mod_layer" : mod_layer,
"layer_summary" : layer_summary }
menu_tpl = pyplate.Template(menudata)
menu_buf = menu_tpl.execute_string(menu_args)
content_tpl = pyplate.Template(indexdata)
content_buf = content_tpl.execute_string(menu_args)
main_content_buf += content_buf
body_args = { "menu" : menu_buf,
"content" : content_buf }
index_file = mod_layer + ".html"
index_fh = open(index_file, "w")
body_tpl = pyplate.Template(bodydata)
body_tpl.execute(index_fh, body_args)
index_fh.close()
menu = gen_doc_menu(None, module_list)
menu_args = { "menulist" : menu,
"mod_layer" : None }
menu_tpl = pyplate.Template(menudata)
menu_buf = menu_tpl.execute_string(menu_args)
body_args = { "menu" : menu_buf,
"content" : main_content_buf }
index_file = "index.html"
index_fh = open(index_file, "w")
body_tpl = pyplate.Template(bodydata)
body_tpl.execute(index_fh, body_args)
index_fh.close()
#now generate the individual module pages
all_interfaces = []
all_templates = []
all_tunables = []
all_booleans = []
for node in doc.getElementsByTagName("module"):
mod_name = mod_layer = mod_desc = interface_buf = ''
mod_name = node.getAttribute("name")
mod_layer = node.parentNode.getAttribute("name")
mod_req = None
for req in node.getElementsByTagName("required"):
if req.getAttribute("val") == "true":
mod_req = True
for desc in node.getElementsByTagName("summary"):
if desc.parentNode == node:
mod_summary = format_html_desc(desc)
for desc in node.getElementsByTagName("desc"):
if desc.parentNode == node:
mod_desc = format_html_desc(desc)
interfaces = []
for interface in node.getElementsByTagName("interface"):
interface_parameters = []
interface_desc = interface_summary = None
interface_name = interface.getAttribute("name")
interface_line = interface.getAttribute("lineno")
for desc in interface.childNodes:
if desc.nodeName == "desc":
interface_desc = format_html_desc(desc)
elif desc.nodeName == "summary":
interface_summary = format_html_desc(desc)
for args in interface.getElementsByTagName("param"):
for desc in args.getElementsByTagName("summary"):
paramdesc = format_html_desc(desc)
paramname = args.getAttribute("name")
if args.getAttribute("optional") == "true":
paramopt = "Yes"
else:
paramopt = "No"
if args.getAttribute("unused") == "true":
paramunused = "Yes"
else:
paramunused = "No"
parameter = { "name" : paramname,
"desc" : paramdesc,
"optional" : paramopt,
"unused" : paramunused }
interface_parameters.append(parameter)
interfaces.append( { "interface_name" : interface_name,
"interface_summary" : interface_summary,
"interface_desc" : interface_desc,
"interface_parameters" : interface_parameters })
#all_interfaces is for the main interface index with all interfaces
all_interfaces.append( { "interface_name" : interface_name,
"interface_summary" : interface_summary,
"interface_desc" : interface_desc,
"interface_parameters" : interface_parameters,
"mod_name": mod_name,
"mod_layer" : mod_layer })
interfaces.sort(key=int_cmp_func)
interface_tpl = pyplate.Template(intdata)
interface_buf = interface_tpl.execute_string({"interfaces" : interfaces})
# now generate individual template pages
templates = []
for template in node.getElementsByTagName("template"):
template_parameters = []
template_desc = template_summary = None
template_name = template.getAttribute("name")
template_line = template.getAttribute("lineno")
for desc in template.childNodes:
if desc.nodeName == "desc":
template_desc = format_html_desc(desc)
elif desc.nodeName == "summary":
template_summary = format_html_desc(desc)
for args in template.getElementsByTagName("param"):
for desc in args.getElementsByTagName("summary"):
paramdesc = format_html_desc(desc)
paramname = args.getAttribute("name")
if args.getAttribute("optional") == "true":
paramopt = "Yes"
else:
paramopt = "No"
if args.getAttribute("unused") == "true":
paramunused = "Yes"
else:
paramunused = "No"
parameter = { "name" : paramname,
"desc" : paramdesc,
"optional" : paramopt,
"unused": paramunused }
template_parameters.append(parameter)
templates.append( { "template_name" : template_name,
"template_summary" : template_summary,
"template_desc" : template_desc,
"template_parameters" : template_parameters })
#all_templates is for the main interface index with all templates
all_templates.append( { "template_name" : template_name,
"template_summary" : template_summary,
"template_desc" : template_desc,
"template_parameters" : template_parameters,
"mod_name": mod_name,
"mod_layer" : mod_layer })
templates.sort(key=temp_cmp_func)
template_tpl = pyplate.Template(templatedata)
template_buf = template_tpl.execute_string({"templates" : templates})
#generate 'boolean' pages
booleans = []
for boolean in node.getElementsByTagName("bool"):
boolean_parameters = []
boolean_desc = None
boolean_name = boolean.getAttribute("name")
boolean_dftval = boolean.getAttribute("dftval")
for desc in boolean.childNodes:
if desc.nodeName == "desc":
boolean_desc = format_html_desc(desc)
booleans.append({ "bool_name" : boolean_name,
"desc" : boolean_desc,
"def_val" : boolean_dftval })
#all_booleans is for the main boolean index with all booleans
all_booleans.append({ "bool_name" : boolean_name,
"desc" : boolean_desc,
"def_val" : boolean_dftval,
"mod_name": mod_name,
"mod_layer" : mod_layer })
booleans.sort(key=bool_cmp_func)
boolean_tpl = pyplate.Template(booldata)
boolean_buf = boolean_tpl.execute_string({"booleans" : booleans})
#generate 'tunable' pages
tunables = []
for tunable in node.getElementsByTagName("tunable"):
tunable_parameters = []
tunable_desc = None
tunable_name = tunable.getAttribute("name")
tunable_dftval = tunable.getAttribute("dftval")
for desc in tunable.childNodes:
if desc.nodeName == "desc":
tunable_desc = format_html_desc(desc)
tunables.append({ "tun_name" : tunable_name,
"desc" : tunable_desc,
"def_val" : tunable_dftval })
#all_tunables is for the main tunable index with all tunables
all_tunables.append({ "tun_name" : tunable_name,
"desc" : tunable_desc,
"def_val" : tunable_dftval,
"mod_name": mod_name,
"mod_layer" : mod_layer })
tunables.sort(key=tun_cmp_func)
tunable_tpl = pyplate.Template(tundata)
tunable_buf = tunable_tpl.execute_string({"tunables" : tunables})
menu = gen_doc_menu(mod_layer, module_list)
menu_tpl = pyplate.Template(menudata)
menu_buf = menu_tpl.execute_string({ "menulist" : menu })
# pyplate's execute_string gives us a line of whitespace in
# template_buf or interface_buf if there are no interfaces or
# templates for this module. This is problematic because the
# HTML templates use a conditional if on interface_buf or
# template_buf being 'None' to decide if the "Template:" or
# "Interface:" headers need to be printed in the module pages.
# This detects if either of these are just whitespace, and sets
# their values to 'None' so that when applying it to the
# templates, they are properly recognized as not existing.
if not interface_buf.strip():
interface_buf = None
if not template_buf.strip():
template_buf = None
if not tunable_buf.strip():
tunable_buf = None
if not boolean_buf.strip():
boolean_buf = None
module_args = { "mod_layer" : mod_layer,
"mod_name" : mod_name,
"mod_summary" : mod_summary,
"mod_desc" : mod_desc,
"mod_req" : mod_req,
"interfaces" : interface_buf,
"templates" : template_buf,
"tunables" : tunable_buf,
"booleans" : boolean_buf }
module_tpl = pyplate.Template(moduledata)
module_buf = module_tpl.execute_string(module_args)
body_args = { "menu" : menu_buf,
"content" : module_buf }
module_file = mod_layer + "_" + mod_name + ".html"
module_fh = open(module_file, "w")
body_tpl = pyplate.Template(bodydata)
body_tpl.execute(module_fh, body_args)
module_fh.close()
menu = gen_doc_menu(None, module_list)
menu_args = { "menulist" : menu,
"mod_layer" : None }
menu_tpl = pyplate.Template(menudata)
menu_buf = menu_tpl.execute_string(menu_args)
#build the interface index
all_interfaces.sort(key=int_cmp_func)
interface_tpl = pyplate.Template(intlistdata)
interface_buf = interface_tpl.execute_string({"interfaces" : all_interfaces})
int_file = "interfaces.html"
int_fh = open(int_file, "w")
body_tpl = pyplate.Template(bodydata)
body_args = { "menu" : menu_buf,
"content" : interface_buf }
body_tpl.execute(int_fh, body_args)
int_fh.close()
#build the template index
all_templates.sort(key=temp_cmp_func)
template_tpl = pyplate.Template(templistdata)
template_buf = template_tpl.execute_string({"templates" : all_templates})
temp_file = "templates.html"
temp_fh = open(temp_file, "w")
body_tpl = pyplate.Template(bodydata)
body_args = { "menu" : menu_buf,
"content" : template_buf }
body_tpl.execute(temp_fh, body_args)
temp_fh.close()
#build the global tunable index
global_tun = []
for tunable in doc.getElementsByTagName("tunable"):
if tunable.parentNode.nodeName == "policy":
tunable_name = tunable.getAttribute("name")
default_value = tunable.getAttribute("dftval")
for desc in tunable.getElementsByTagName("desc"):
description = format_html_desc(desc)
global_tun.append( { "tun_name" : tunable_name,
"def_val" : default_value,
"desc" : description } )
global_tun.sort(key=tun_cmp_func)
global_tun_tpl = pyplate.Template(gtunlistdata)
global_tun_buf = global_tun_tpl.execute_string({"tunables" : global_tun})
global_tun_file = "global_tunables.html"
global_tun_fh = open(global_tun_file, "w")
body_tpl = pyplate.Template(bodydata)
body_args = { "menu" : menu_buf,
"content" : global_tun_buf }
body_tpl.execute(global_tun_fh, body_args)
global_tun_fh.close()
#build the tunable index
all_tunables = all_tunables + global_tun
all_tunables.sort(key=tun_cmp_func)
tunable_tpl = pyplate.Template(tunlistdata)
tunable_buf = tunable_tpl.execute_string({"tunables" : all_tunables})
temp_file = "tunables.html"
temp_fh = open(temp_file, "w")
body_tpl = pyplate.Template(bodydata)
body_args = { "menu" : menu_buf,
"content" : tunable_buf }
body_tpl.execute(temp_fh, body_args)
temp_fh.close()
#build the global boolean index
global_bool = []
for boolean in doc.getElementsByTagName("bool"):
if boolean.parentNode.nodeName == "policy":
bool_name = boolean.getAttribute("name")
default_value = boolean.getAttribute("dftval")
for desc in boolean.getElementsByTagName("desc"):
description = format_html_desc(desc)
global_bool.append( { "bool_name" : bool_name,
"def_val" : default_value,
"desc" : description } )
global_bool.sort(key=bool_cmp_func)
global_bool_tpl = pyplate.Template(gboollistdata)
global_bool_buf = global_bool_tpl.execute_string({"booleans" : global_bool})
global_bool_file = "global_booleans.html"
global_bool_fh = open(global_bool_file, "w")
body_tpl = pyplate.Template(bodydata)
body_args = { "menu" : menu_buf,
"content" : global_bool_buf }
body_tpl.execute(global_bool_fh, body_args)
global_bool_fh.close()
#build the boolean index
all_booleans = all_booleans + global_bool
all_booleans.sort(key=bool_cmp_func)
boolean_tpl = pyplate.Template(boollistdata)
boolean_buf = boolean_tpl.execute_string({"booleans" : all_booleans})
temp_file = "booleans.html"
temp_fh = open(temp_file, "w")
body_tpl = pyplate.Template(bodydata)
body_args = { "menu" : menu_buf,
"content" : boolean_buf }
body_tpl.execute(temp_fh, body_args)
temp_fh.close()
def error(error):
"""
Print an error message and exit.
"""
sys.stderr.write("%s exiting for: " % sys.argv[0])
sys.stderr.write("%s\n" % error)
sys.stderr.flush()
sys.exit(1)
def warning(warn):
"""
Print a warning message.
"""
sys.stderr.write("%s warning: " % sys.argv[0])
sys.stderr.write("%s\n" % warn)
def usage():
"""
Describes the proper usage of this tool.
"""
sys.stdout.write("%s [-tmdT] -x <xmlfile>\n\n" % sys.argv[0])
sys.stdout.write("Options:\n")
sys.stdout.write("-b --booleans <file> -- write boolean config to <file>\n")
sys.stdout.write("-m --modules <file> -- write module config to <file>\n")
sys.stdout.write("-d --docs <dir> -- write interface documentation to <dir>\n")
sys.stdout.write("-x --xml <file> -- filename to read xml data from\n")
sys.stdout.write("-T --templates <dir> -- template directory for documents\n")
# MAIN PROGRAM
try:
opts, args = getopt.getopt(sys.argv[1:], "b:m:d:x:T:", ["booleans","modules","docs","xml", "templates"])
except getopt.GetoptError:
usage()
sys.exit(1)
booleans = modules = docsdir = None
templatedir = "templates/"
xmlfile = "policy.xml"
for opt, val in opts:
if opt in ("-b", "--booleans"):
booleans = val
if opt in ("-m", "--modules"):
modules = val
if opt in ("-d", "--docs"):
docsdir = val
if opt in ("-x", "--xml"):
xmlfile = val
if opt in ("-T", "--templates"):
templatedir = val
doc = read_policy_xml(xmlfile)
if booleans:
namevalue_list = []
if os.path.exists(booleans):
try:
conf = open(booleans, 'r')
except:
error("Could not open booleans file for reading")
namevalue_list = get_conf(conf)
conf.close()
try:
conf = open(booleans, 'w')
except:
error("Could not open booleans file for writing")
gen_booleans_conf(doc, conf, namevalue_list)
conf.close()
if modules:
namevalue_list = []
if os.path.exists(modules):
try:
conf = open(modules, 'r')
except:
error("Could not open modules file for reading")
namevalue_list = get_conf(conf)
conf.close()
try:
conf = open(modules, 'w')
except:
error("Could not open modules file for writing")
gen_module_conf(doc, conf, namevalue_list)
conf.close()
if docsdir:
gen_docs(doc, docsdir, templatedir)
|
gpl-2.0
|
waneric/PyMapLib
|
src/gabbs/controls/MapToolAction.py
|
1
|
6234
|
# -*- coding: utf-8 -*-
"""
MapToolAction.py - map tool for user events
======================================================================
AUTHOR: Wei Wan, Purdue University
EMAIL: [email protected]
Copyright (c) 2016 Purdue University
See the file "license.terms" for information on usage and
redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
======================================================================
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from gabbs.MapUtils import iface, debug_trace
class MapToolFeatureAction(QgsMapTool):
'''
Base class for the map select tools
'''
def __init__(self, canvas):
QgsMapTool.__init__(self, canvas)
self.canvas = canvas
self.rubberBand = None
self.cursor = QCursor(Qt.ArrowCursor)
# Override events
def canvasReleaseEvent(self, event):
layer = self.canvas.currentLayer()
if not layer or layer.type() != QgsMapLayer.VectorLayer:
#emit messageEmitted( tr( "To run an action, you must choose an active vector layer." ), QgsMessageBar.INFO );
return
if layer not in self.canvas.layers():
#do not run actions on hidden layers
return
#QgsVectorLayer *vlayer = qobject_cast<QgsVectorLayer *>( layer );
#if (layer.actions().size() == 0 and \
# len(QgsMapLayerActionRegistry.instance().mapLayerActions(layer)) == 0):
#emit messageEmitted( tr( "The active vector layer has no defined actions" ), QgsMessageBar::INFO );
return
if(not self.doAction(layer, event.x(), event.y())):
#QgisApp.instance().statusBar().showMessage(tr("No features at this position found."))
pass
"""
def activate(self):
QgsMapTool.activate()
def deactivate(self):
QgsMapTool.deactivate()
"""
def doAction(self, layer, x, y):
if (not layer):
return False
point = self.canvas.getCoordinateTransform().toMapCoordinates(x, y)
featList = []
#toLayerCoordinates will throw an exception for an 'invalid' point.
#For example, if you project a world map onto a globe using EPSG 2163
#and then click somewhere off the globe, an exception will be thrown.
try:
#create the search rectangle
searchRadius = self.searchRadiusMU(self.canvas)
r = QgsRectangle()
r.setXMinimum(point.x() - searchRadius)
r.setXMaximum(point.x() + searchRadius)
r.setYMinimum(point.y() - searchRadius)
r.setYMaximum(point.y() + searchRadius)
r = self.toLayerCoordinates(layer, r)
fit = layer.getFeatures(QgsFeatureRequest().setFilterRect(r).setFlags(QgsFeatureRequest.ExactIntersect))
f = QgsFeature()
while(fit.nextFeature(f)):
featList.append(QgsFeature(f))
except QgsCsException as cse:
#Q_UNUSED(cse)
#catch exception for 'invalid' point and proceed with no features found
QgsDebugMsg(QString( "Caught CRS exception %1" ).arg(cse.what()))
if len(featList) == 0:
return False
for feat in featList:
if (layer.actions().defaultAction() >= 0):
#define custom substitutions: layer id and clicked coords
substitutionMap = {} #QMap
substitutionMap["$layerid"] = layer.id()
point = self.toLayerCoordinates(layer, point)
substitutionMap["$clickx"] = point.x()
substitutionMap["$clicky"] = point.y()
actionIdx = layer.actions().defaultAction()
#layer.actions().doAction(actionIdx, feat, substitutionMap)
self.doAttributeAction(layer, actionIdx, feat, substitutionMap)
else:
mapLayerAction = QgsMapLayerActionRegistry.instance().defaultActionForLayer(layer)
if(mapLayerAction):
mapLayerAction.triggerForFeature(layer, feat)
return True
""" Reimplement method in QGIS C++ code
"""
def doAttributeAction(self, layer, index, feat, substitutionMap):
actions = layer.actions()
if (index < 0 or index >= actions.size()):
return
action = actions.at(index)
if (not action.runable()):
return
# search for expressions while expanding actions
# no used for python binding
#context = self.createExpressionContext(layer)
#context.setFeature(feat)
#expandedAction = QString(QgsExpression.replaceExpressionText(action.action(), context, substitutionMap))
expandedAction = QString(QgsExpression.replaceExpressionText(action.action(), feat, layer, substitutionMap))
if (expandedAction.isEmpty()):
return
newAction = QgsAction(action.type(), action.name(), expandedAction, action.capture())
self.runAttributeAction(newAction)
def runAttributeAction(self, action):
if (action.type() == QgsAction.OpenUrl):
finfo = QFileInfo(action.action())
if (finfo.exists() and finfo.isFile()):
QDesktopServices.openUrl(QUrl.fromLocalFile(action.action()))
else:
QDesktopServices.openUrl(QUrl(action.action(), QUrl.TolerantMode))
elif (action.type() == QgsAction.GenericPython):
# TODO: capture output from QgsPythonRunner (like QgsRunProcess does)
QgsPythonRunner.run(action.action(), QString("Python running error"))
else:
#The QgsRunProcess instance created by this static function
#deletes itself when no longer needed.
QgsRunProcess.create(action.action(), action.capture())
"""
def createExpressionContext(self, layer):
context = QgsExpressionContext()
context.append(QgsExpressionContextUtils.globalScope())
context.append(QgsExpressionContextUtils.projectScope())
if (layer):
context.append(QgsExpressionContextUtils.layerScope(layer))
return context
"""
|
mit
|
didzis/AMRParsing
|
stanfordnlp/unidecode/x06d.py
|
252
|
4651
|
data = (
'Zhou ', # 0x00
'Ji ', # 0x01
'Yi ', # 0x02
'Hui ', # 0x03
'Hui ', # 0x04
'Zui ', # 0x05
'Cheng ', # 0x06
'Yin ', # 0x07
'Wei ', # 0x08
'Hou ', # 0x09
'Jian ', # 0x0a
'Yang ', # 0x0b
'Lie ', # 0x0c
'Si ', # 0x0d
'Ji ', # 0x0e
'Er ', # 0x0f
'Xing ', # 0x10
'Fu ', # 0x11
'Sa ', # 0x12
'Suo ', # 0x13
'Zhi ', # 0x14
'Yin ', # 0x15
'Wu ', # 0x16
'Xi ', # 0x17
'Kao ', # 0x18
'Zhu ', # 0x19
'Jiang ', # 0x1a
'Luo ', # 0x1b
'[?] ', # 0x1c
'An ', # 0x1d
'Dong ', # 0x1e
'Yi ', # 0x1f
'Mou ', # 0x20
'Lei ', # 0x21
'Yi ', # 0x22
'Mi ', # 0x23
'Quan ', # 0x24
'Jin ', # 0x25
'Mo ', # 0x26
'Wei ', # 0x27
'Xiao ', # 0x28
'Xie ', # 0x29
'Hong ', # 0x2a
'Xu ', # 0x2b
'Shuo ', # 0x2c
'Kuang ', # 0x2d
'Tao ', # 0x2e
'Qie ', # 0x2f
'Ju ', # 0x30
'Er ', # 0x31
'Zhou ', # 0x32
'Ru ', # 0x33
'Ping ', # 0x34
'Xun ', # 0x35
'Xiong ', # 0x36
'Zhi ', # 0x37
'Guang ', # 0x38
'Huan ', # 0x39
'Ming ', # 0x3a
'Huo ', # 0x3b
'Wa ', # 0x3c
'Qia ', # 0x3d
'Pai ', # 0x3e
'Wu ', # 0x3f
'Qu ', # 0x40
'Liu ', # 0x41
'Yi ', # 0x42
'Jia ', # 0x43
'Jing ', # 0x44
'Qian ', # 0x45
'Jiang ', # 0x46
'Jiao ', # 0x47
'Cheng ', # 0x48
'Shi ', # 0x49
'Zhuo ', # 0x4a
'Ce ', # 0x4b
'Pal ', # 0x4c
'Kuai ', # 0x4d
'Ji ', # 0x4e
'Liu ', # 0x4f
'Chan ', # 0x50
'Hun ', # 0x51
'Hu ', # 0x52
'Nong ', # 0x53
'Xun ', # 0x54
'Jin ', # 0x55
'Lie ', # 0x56
'Qiu ', # 0x57
'Wei ', # 0x58
'Zhe ', # 0x59
'Jun ', # 0x5a
'Han ', # 0x5b
'Bang ', # 0x5c
'Mang ', # 0x5d
'Zhuo ', # 0x5e
'You ', # 0x5f
'Xi ', # 0x60
'Bo ', # 0x61
'Dou ', # 0x62
'Wan ', # 0x63
'Hong ', # 0x64
'Yi ', # 0x65
'Pu ', # 0x66
'Ying ', # 0x67
'Lan ', # 0x68
'Hao ', # 0x69
'Lang ', # 0x6a
'Han ', # 0x6b
'Li ', # 0x6c
'Geng ', # 0x6d
'Fu ', # 0x6e
'Wu ', # 0x6f
'Lian ', # 0x70
'Chun ', # 0x71
'Feng ', # 0x72
'Yi ', # 0x73
'Yu ', # 0x74
'Tong ', # 0x75
'Lao ', # 0x76
'Hai ', # 0x77
'Jin ', # 0x78
'Jia ', # 0x79
'Chong ', # 0x7a
'Weng ', # 0x7b
'Mei ', # 0x7c
'Sui ', # 0x7d
'Cheng ', # 0x7e
'Pei ', # 0x7f
'Xian ', # 0x80
'Shen ', # 0x81
'Tu ', # 0x82
'Kun ', # 0x83
'Pin ', # 0x84
'Nie ', # 0x85
'Han ', # 0x86
'Jing ', # 0x87
'Xiao ', # 0x88
'She ', # 0x89
'Nian ', # 0x8a
'Tu ', # 0x8b
'Yong ', # 0x8c
'Xiao ', # 0x8d
'Xian ', # 0x8e
'Ting ', # 0x8f
'E ', # 0x90
'Su ', # 0x91
'Tun ', # 0x92
'Juan ', # 0x93
'Cen ', # 0x94
'Ti ', # 0x95
'Li ', # 0x96
'Shui ', # 0x97
'Si ', # 0x98
'Lei ', # 0x99
'Shui ', # 0x9a
'Tao ', # 0x9b
'Du ', # 0x9c
'Lao ', # 0x9d
'Lai ', # 0x9e
'Lian ', # 0x9f
'Wei ', # 0xa0
'Wo ', # 0xa1
'Yun ', # 0xa2
'Huan ', # 0xa3
'Di ', # 0xa4
'[?] ', # 0xa5
'Run ', # 0xa6
'Jian ', # 0xa7
'Zhang ', # 0xa8
'Se ', # 0xa9
'Fu ', # 0xaa
'Guan ', # 0xab
'Xing ', # 0xac
'Shou ', # 0xad
'Shuan ', # 0xae
'Ya ', # 0xaf
'Chuo ', # 0xb0
'Zhang ', # 0xb1
'Ye ', # 0xb2
'Kong ', # 0xb3
'Wo ', # 0xb4
'Han ', # 0xb5
'Tuo ', # 0xb6
'Dong ', # 0xb7
'He ', # 0xb8
'Wo ', # 0xb9
'Ju ', # 0xba
'Gan ', # 0xbb
'Liang ', # 0xbc
'Hun ', # 0xbd
'Ta ', # 0xbe
'Zhuo ', # 0xbf
'Dian ', # 0xc0
'Qie ', # 0xc1
'De ', # 0xc2
'Juan ', # 0xc3
'Zi ', # 0xc4
'Xi ', # 0xc5
'Yao ', # 0xc6
'Qi ', # 0xc7
'Gu ', # 0xc8
'Guo ', # 0xc9
'Han ', # 0xca
'Lin ', # 0xcb
'Tang ', # 0xcc
'Zhou ', # 0xcd
'Peng ', # 0xce
'Hao ', # 0xcf
'Chang ', # 0xd0
'Shu ', # 0xd1
'Qi ', # 0xd2
'Fang ', # 0xd3
'Chi ', # 0xd4
'Lu ', # 0xd5
'Nao ', # 0xd6
'Ju ', # 0xd7
'Tao ', # 0xd8
'Cong ', # 0xd9
'Lei ', # 0xda
'Zhi ', # 0xdb
'Peng ', # 0xdc
'Fei ', # 0xdd
'Song ', # 0xde
'Tian ', # 0xdf
'Pi ', # 0xe0
'Dan ', # 0xe1
'Yu ', # 0xe2
'Ni ', # 0xe3
'Yu ', # 0xe4
'Lu ', # 0xe5
'Gan ', # 0xe6
'Mi ', # 0xe7
'Jing ', # 0xe8
'Ling ', # 0xe9
'Lun ', # 0xea
'Yin ', # 0xeb
'Cui ', # 0xec
'Qu ', # 0xed
'Huai ', # 0xee
'Yu ', # 0xef
'Nian ', # 0xf0
'Shen ', # 0xf1
'Piao ', # 0xf2
'Chun ', # 0xf3
'Wa ', # 0xf4
'Yuan ', # 0xf5
'Lai ', # 0xf6
'Hun ', # 0xf7
'Qing ', # 0xf8
'Yan ', # 0xf9
'Qian ', # 0xfa
'Tian ', # 0xfb
'Miao ', # 0xfc
'Zhi ', # 0xfd
'Yin ', # 0xfe
'Mi ', # 0xff
)
|
gpl-2.0
|
obnam-mirror/obnam
|
obnamlib/backup_progress.py
|
1
|
6958
|
# Copyright (C) 2009-2017 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import time
import obnamlib
class BackupProgress(object):
def __init__(self, ts):
self.file_count = 0
self.backed_up_count = 0
self.uploaded_bytes = 0
self.scanned_bytes = 0
self.started = None
self.errors = False
self._now = time.time
self._ts = ts
self._ts['current-file'] = ''
self._ts['scanned-bytes'] = 0
self._ts['uploaded-bytes'] = 0
if self.ttystatus_supports_multiline(): # pragma: no cover
self._ts.format(
'%ElapsedTime() Backing up: '
'found %Counter(current-file) files, '
'%ByteSize(scanned-bytes); '
'uploaded: %ByteSize(uploaded-bytes)\n'
'%String(what)'
)
else: # pragma: no cover
self._ts.format(
'%ElapsedTime() '
'%Counter(current-file) '
'files '
'%ByteSize(scanned-bytes) scanned: '
'%String(what)')
def set_time_func(self, now):
self._now = now
def ttystatus_supports_multiline(self):
return hasattr(self._ts, 'start_new_line')
def clear(self): # pragma: no cover
self._ts.clear()
def finish(self): # pragma: no cover
self._ts.finish()
def error(self, msg, exc=None):
self.errors = True
logging.error(msg, exc_info=exc)
self._ts.error('ERROR: %s' % msg)
def what(self, what_what):
if self.started is None:
self.started = self._now()
self._ts['what'] = what_what
self._ts.flush()
def update_progress(self): # pragma: no cover
self._ts['not-shown'] = 'not shown'
def update_progress_with_file(self, filename, metadata):
self._ts['what'] = filename
self._ts['current-file'] = filename
self.file_count += 1
def update_progress_with_scanned(self, amount):
self.scanned_bytes += amount
self._ts['scanned-bytes'] = self.scanned_bytes
def update_progress_with_upload(self, amount):
self.uploaded_bytes += amount
self._ts['uploaded-bytes'] = self.uploaded_bytes
def update_progress_with_removed_checkpoint(self, gen): # pragma: no cover
self._ts['checkpoint'] = gen
def compute_report(self, fs):
duration = self._now() - self.started
overhead = fs.bytes_written + fs.bytes_read - self.uploaded_bytes
speed = self.uploaded_bytes / float(duration)
return {
'duration': duration,
'file-count': self.file_count,
'backed-up-count': self.backed_up_count,
'scanned-bytes': self.scanned_bytes,
'uploaded-chunk-bytes': self.uploaded_bytes,
'uploaded-total-bytes': fs.bytes_written,
'downloaded-total-bytes': fs.bytes_read,
'overhead-total-bytes': overhead,
'effective-upload-speed': speed,
}
def report_stats(self, output, fs, quiet, report=None): # pragma: no cover
if report is None:
report = self.compute_report(fs)
duration_string = obnamlib.humanise_duration(report['duration'])
chunk_amount, chunk_unit = obnamlib.humanise_size(
report['uploaded-total-bytes'])
ul_amount, ul_unit = obnamlib.humanise_size(
report['uploaded-total-bytes'])
dl_amount, dl_unit = obnamlib.humanise_size(
report['downloaded-total-bytes'])
overhead_bytes = (
report['downloaded-total-bytes'] +
(report['uploaded-total-bytes'] - report['uploaded-total-bytes']))
overhead_bytes = max(0, overhead_bytes)
overhead_amount, overhead_unit = obnamlib.humanise_size(overhead_bytes)
if report['uploaded-total-bytes'] > 0:
overhead_percent = (
100.0 * overhead_bytes / report['uploaded-total-bytes'])
else:
overhead_percent = 0.0
speed_amount, speed_unit = obnamlib.humanise_speed(
report['uploaded-total-bytes'], report['duration'])
logging.info(
'Backup performance statistics:')
logging.info(
'* files found: %s',
report['file-count'])
logging.info(
'* files backed up: %s',
report['backed-up-count'])
logging.info(
'* uploaded chunk data: %s bytes (%s %s)',
report['uploaded-total-bytes'], chunk_amount, chunk_unit)
logging.info(
'* total uploaded data (incl. metadata): %s bytes (%s %s)',
report['uploaded-total-bytes'], ul_amount, ul_unit)
logging.info(
'* total downloaded data (incl. metadata): %s bytes (%s %s)',
report['downloaded-total-bytes'], dl_amount, dl_unit)
logging.info(
'* transfer overhead: %s bytes (%s %s)',
overhead_bytes, overhead_amount, overhead_unit)
logging.info(
'* duration: %s s (%s)',
report['duration'], duration_string)
logging.info(
'* average speed: %s %s',
speed_amount, speed_unit)
scanned_amount, scanned_unit = obnamlib.humanise_size(
report['scanned-bytes'])
if not quiet:
output.write(
'Backed up %d files (of %d found), containing %.1f %s.\n' %
(report['backed-up-count'],
report['file-count'],
scanned_amount,
scanned_unit))
output.write(
'Uploaded %.1f %s file data in %s at %.1f %s '
'average speed.\n' %
(chunk_amount,
chunk_unit,
duration_string,
speed_amount,
speed_unit))
output.write(
'Total download amount %.1f %s.\n' %
(dl_amount,
dl_unit))
output.write(
'Total upload amount %.1f %s. '
'Overhead was %.1f %s (%.1f %%).\n' %
(ul_amount,
ul_unit,
overhead_amount,
overhead_unit,
overhead_percent))
|
gpl-3.0
|
blueburningcoder/pybrain
|
pybrain/optimization/populationbased/multiobjective/constnsga2.py
|
25
|
4029
|
__author__ = 'proposed by Jean Pierre Queau , jeanpierre.queau"sbmoffshore.com'
from scipy import array
from pybrain.optimization.optimizer import BlackBoxOptimizer
from pybrain.optimization.populationbased.ga import GA
from pybrain.tools.nondominated import const_non_dominated_front, const_crowding_distance, const_non_dominated_sort
# TODO: not very elegant, because of the conversions between tuples and arrays all the time...
class ConstMultiObjectiveGA(GA):
""" Constrained Multi-objective Genetic Algorithm: the fitness is a vector with one entry per objective.
By default we use NSGA-II selection. """
topProportion = 0.5
elitism = True
populationSize = 100
mutationStdDev = 1.
allowEquality = True
mustMaximize = True
def _learnStep(self):
""" do one generation step """
# evaluate fitness
if isinstance(self.fitnesses,dict):
oldfitnesses = self.fitnesses
self.fitnesses = dict()
for indiv in self.currentpop:
if tuple(indiv) in oldfitnesses:
self.fitnesses[tuple(indiv)] = oldfitnesses[tuple(indiv)]
else:
self.fitnesses[tuple(indiv)] = self._oneEvaluation(indiv)
del oldfitnesses
else:
self.fitnesses = dict([(tuple(indiv), self._oneEvaluation(indiv)) for indiv in self.currentpop])
if self.storeAllPopulations:
self._allGenerations.append((self.currentpop, self.fitnesses))
if self.elitism:
self.bestEvaluable = list(const_non_dominated_front(list(map(tuple, self.currentpop)),
key=lambda x: self.fitnesses[x],
allowequality = self.allowEquality))
else:
self.bestEvaluable = list(const_non_dominated_front(list(map(tuple, self.currentpop))+self.bestEvaluable,
key=lambda x: self.fitnesses[x],
allowequality = self.allowEquality))
self.bestEvaluation = [self.fitnesses[indiv] for indiv in self.bestEvaluable]
# self.eliteProportion = float(len(self.bestEvaluable))/self.populationSize
# number_of_feasible = const_number_of_feasible_pop(map(tuple, self.currentpop),
# key=lambda x: self.fitnesses[x],
# allowequality = self.allowEquality)
# self.topProportion = float(number_of_feasible)/self.populationSize
# print('Len bestEvaluable ',len(self.bestEvaluable))
# for i in range(len(self.bestEvaluable)):
# print(self.bestEvaluable[i],':',self.bestEvaluation[i])
self.produceOffspring()
def select(self):
return list(map(array, nsga2select(list(map(tuple, self.currentpop)), self.fitnesses,
self.selectionSize, self.allowEquality)))
def nsga2select(population, fitnesses, survivors, allowequality = True):
"""The NSGA-II selection strategy (Deb et al., 2002).
The number of individuals that survive is given by the survivors parameter."""
fronts = const_non_dominated_sort(population,
key=lambda x: fitnesses[x],
allowequality = allowequality)
individuals = set()
for front in fronts:
remaining = survivors - len(individuals)
if not remaining > 0:
break
if len(front) > remaining:
# If the current front does not fit in the spots left, use those
# that have the biggest crowding distance.
crowd_dist = const_crowding_distance(front, fitnesses)
front = sorted(front, key=lambda x: crowd_dist[x], reverse=True)
front = set(front[:remaining])
individuals |= front
return list(individuals)
|
bsd-3-clause
|
Yury191/brownstonetutors
|
BrownstoneTutors/views.py
|
1
|
2876
|
from allauth.account.views import *
from django.shortcuts import render, render_to_response, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template import RequestContext, Template , Context
from django.views.generic import View
from django.views.generic.detail import SingleObjectMixin, DetailView
from schedule.periods import Year, Month, Week, Day
from schedule.views import CalendarByPeriodsView
from schedule.models import Calendar
from schedule.utils import check_event_permissions, check_calendar_permissions, coerce_date_dict
from schedule.conf.settings import GET_EVENTS_FUNC, OCCURRENCE_CANCEL_REDIRECT
from schedule.periods import weekday_names
from django.utils import timezone
import pytz
import datetime
from urllib.parse import quote
def index(request):
if request.user.is_authenticated():
return render_to_response('base.html', RequestContext(request))
else:
return HttpResponseRedirect(reverse('account_login'))
class AccountCalendar(DetailView):
template_name = 'schedule:fullcalendar.html'
def get(self, request, *args, **kwargs):
self.object = get_object_or_404(Calendar, profile__user=request.user)
kwargs.update({'periods': [Week]})
context = self.get_context_data(request, **kwargs)
context.update({'calendar_slug': self.object.slug})
return self.render_to_response(context)
def get_context_data(self, request, **kwargs):
context = super(AccountCalendar, self).get_context_data(**kwargs)
calendar = self.object
periods = kwargs.get('periods', None)
try:
date = coerce_date_dict(request.GET)
except ValueError:
raise Http404
if date:
try:
date = datetime.datetime(**date)
except ValueError:
raise Http404
else:
date = timezone.now()
event_list = GET_EVENTS_FUNC(request, calendar)
if 'django_timezone' in self.request.session:
local_timezone = pytz.timezone(request.session['django_timezone'])
else:
local_timezone = timezone.get_default_timezone()
period_objects = {}
for period in periods:
if period.__name__.lower() == 'year':
period_objects[period.__name__.lower()] = period(event_list, date, None, local_timezone)
else:
period_objects[period.__name__.lower()] = period(event_list, date, None, None, local_timezone)
context.update({
'date': date,
'periods': period_objects,
'calendar': calendar,
'weekday_names': weekday_names,
'here': quote(request.get_full_path()),
})
return context
calendar = AccountCalendar.as_view()
|
unlicense
|
GeassDB/xunlei-lixian
|
lixian_plugins/commands/list_torrent.py
|
14
|
1951
|
from lixian_plugins.api import command
from lixian_cli_parser import parse_command_line
from lixian_config import get_config
from lixian_encoding import default_encoding
def b_encoding(b):
if 'encoding' in b:
return b['encoding']
if 'codepage' in b:
return 'cp' + str(b['codepage'])
return 'utf-8'
def b_name(info, encoding='utf-8'):
if 'name.utf-8' in info:
return info['name.utf-8'].decode('utf-8')
return info['name'].decode(encoding)
def b_path(f, encoding='utf-8'):
if 'path.utf-8' in f:
return [p.decode('utf-8') for p in f['path.utf-8']]
return [p.decode(encoding) for p in f['path']]
@command(usage='list files in local .torrent')
def list_torrent(args):
'''
usage: lx list-torrent [--size] xxx.torrent...
'''
args = parse_command_line(args, [], ['size'], default={'size':get_config('size')})
torrents = args
if not torrents:
from glob import glob
torrents = glob('*.torrent')
if not torrents:
raise Exception('No .torrent file found')
for p in torrents:
with open(p, 'rb') as stream:
from lixian_hash_bt import bdecode
b = bdecode(stream.read())
encoding = b_encoding(b)
info = b['info']
from lixian_util import format_size
if args.size:
size = sum(f['length'] for f in info['files']) if 'files' in info else info['length']
print '*', b_name(info, encoding).encode(default_encoding), format_size(size)
else:
print '*', b_name(info, encoding).encode(default_encoding)
if 'files' in info:
for f in info['files']:
if f['path'][0].startswith('_____padding_file_'):
continue
path = '/'.join(b_path(f, encoding)).encode(default_encoding)
if args.size:
print '%s (%s)' % (path, format_size(f['length']))
else:
print path
else:
path = b_name(info, encoding).encode(default_encoding)
if args.size:
from lixian_util import format_size
print '%s (%s)' % (path, format_size(info['length']))
else:
print path
|
mit
|
mollstam/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pip-7.1.0/pip/commands/freeze.py
|
311
|
2330
|
from __future__ import absolute_import
import sys
import pip
from pip.basecommand import Command
from pip.operations.freeze import freeze
from pip.wheel import WheelCache
class FreezeCommand(Command):
"""
Output installed packages in requirements format.
packages are listed in a case-insensitive sorted order.
"""
name = 'freeze'
usage = """
%prog [options]"""
summary = 'Output installed packages in requirements format.'
log_streams = ("ext://sys.stderr", "ext://sys.stderr")
def __init__(self, *args, **kw):
super(FreezeCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirement',
action='store',
default=None,
metavar='file',
help="Use the order in the given requirements file and its "
"comments when generating output.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the '
'output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output '
'globally-installed packages.')
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
freeze_kwargs = dict(
requirement=options.requirement,
find_links=options.find_links,
local_only=options.local,
user_only=options.user,
skip_regex=options.skip_requirements_regex,
isolated=options.isolated_mode,
wheel_cache=wheel_cache)
for line in freeze(**freeze_kwargs):
sys.stdout.write(line + '\n')
|
mit
|
jirikuncar/invenio-formatter
|
tests/test_formatter_utils.py
|
1
|
6829
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat - Unit Test Suite"""
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
from invenio.base.wrappers import lazy_import
words_start_with_patterns = lazy_import('invenio_formatter.utils:words_start_with_patterns')
cut_out_snippet_core_creation = lazy_import('invenio_formatter.utils:cut_out_snippet_core_creation')
class WordsStartsWithPatternTest(InvenioTestCase):
"""Test for words start with pattern functionality"""
def test_word_starts_with_single_pattern(self):
"""bibformat - word starts with single pattern"""
self.assertEqual((False, 0), words_start_with_patterns(['thi'], ['this']))
self.assertEqual((True, 0), words_start_with_patterns(['this'], ['this']))
self.assertEqual((True, 0), words_start_with_patterns(['This'], ['this']))
self.assertEqual((True, 0), words_start_with_patterns(['this'], ['tHis']))
self.assertEqual((True, 0), words_start_with_patterns(['This'], ['tHis']))
self.assertEqual((True, 0), words_start_with_patterns(['Thiss'], ['tHis']))
def test_word_starts_with_multi_pattern(self):
"""bibformat - word starts with multi pattern"""
self.assertEqual((False, 0), words_start_with_patterns(['thi'], ['this', 'is', 'a']))
self.assertEqual((False, 0), words_start_with_patterns(['i'], ['this', 'is', 'a']))
self.assertEqual((True, 0), words_start_with_patterns(['this'], ['this', 'is', 'a']))
self.assertEqual((True, 0), words_start_with_patterns(['is'], ['this', 'is', 'a']))
def test_words_start_with_single_pattern(self):
"""bibformat - words start with single pattern"""
self.assertEqual((True, 0), words_start_with_patterns(['this', 'is'], ['thi']))
self.assertEqual((False, 0), words_start_with_patterns(['thi', 'this'], ['this']))
def test_words_start_with_multi_pattern(self):
"""bibformat - words start with multi pattern"""
# Only the first word is considered
self.assertEqual((True, 0), words_start_with_patterns(['this', 'is'], ['this', 'it']))
self.assertEqual((True, 0), words_start_with_patterns(['this', 'is'], ['it', 'thi']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is'], ['it', 'if']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is'], ['is', 'if']))
def test_words_start_with_phrase(self):
"""bibformat - words start with phrase"""
self.assertEqual((True, 2), words_start_with_patterns(['this', 'is', 'a', 'test'], ['this is a']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is', 'a', 'test'], ['no I do not]']))
self.assertEqual((True, 2), words_start_with_patterns(['this', 'is', 'a', 'test'], ['no I do not]', 'this is a']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is'], ['no I do not', 'this is a']))
class SnippetCutOutCoreCreation(InvenioTestCase):
"""Test for snippet cut out core creation"""
_words = dict()
_words[0] = ['CERN', 'LIBRARIES,', 'GENEVA', 'SCAN-0005061', 'Development', 'of', 'Photon', 'Beam', 'Diagnostics',
'for', 'VUV', 'Radiation', 'from', 'a', 'SASE', 'FEL', 'R.', 'Treusch', '1,', 'T.', 'Lokajczyk,', 'W.',
'Xu', '2,', 'U.', 'Jastrow,', 'U.', 'Hahn,', 'Abstract', 'L.', 'Bittner', 'and', 'J.', 'Feldhaus',
'HASYLAB', 'at', 'DESY,', 'Notkcstr.', '85,', 'D\xe2\x80\x94226`U3', 'Hamburg,', 'Germany', 'For',
'the', 'proof-of-principle', 'experiment', 'of', 'self-amplified', 'spontaneous', 'emission', '[SASE)',
'at', 'short', 'wavelengths', 'on', 'the', 'VUV', 'FEL', 'at', 'DESY', 'a', 'multi-facetted', 'photon',
'beam', 'diagnostics', 'experiment', 'has', 'been', 'developed', 'employing', 'new', 'detection',
'concepts', 'to', 'measure', 'all', 'SASE', 'specific', 'properties', 'on', 'a', 'single', 'pulse',
'basis.', 'The', 'present', 'setup', 'includes', 'instrumentation', 'for', 'the', 'measurement', 'of',
'the', 'energy', 'and', 'the', 'angular', 'and', 'spectral', 'distribution', 'of', 'individual', 'photon',
'pulses.', 'Different', 'types', 'of', 'photon', 'detectors', 'such', 'as', 'PtSi-photodiodes', 'and']
def test_term_cut_out(self):
"""bibformat - term snippet cut out core creation"""
self.assertEqual(('This', 0, 0), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This'], 50))
self.assertEqual(('This is a test', 0, 3), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This', 'test'], 50))
self.assertEqual(('is', 1, 1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['is'], 50))
self.assertEqual(('is a new', 1, 3), cut_out_snippet_core_creation(['This', 'is', 'a', 'new', 'test'], ['is', 'new'], 50))
self.assertEqual(('', -1, -1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['new'], 50))
self.assertEqual(('of', 5, 5), cut_out_snippet_core_creation(self._words[0], ['of'], 100))
def test_phrase_cut_out(self):
"""bibformat - phrase snippet cut out core creation"""
self.assertEqual(('This is', 0, 1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This is'], 50))
self.assertEqual(('This is a', 0, 2), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This is a'], 50))
self.assertEqual(('', -1, -1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This not'], 50))
self.assertEqual(('is a', 1, 2), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['is a'], 50))
self.assertEqual(('of the', 92, 93), cut_out_snippet_core_creation(self._words[0], ['of the'], 100))
TEST_SUITE = make_test_suite(WordsStartsWithPatternTest,
SnippetCutOutCoreCreation,
)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
|
gpl-2.0
|
nkrinner/nova
|
nova/tests/servicegroup/test_mc_servicegroup.py
|
27
|
8500
|
# Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama at gmail dot com>
#
# This is derived from test_db_servicegroup.py.
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from nova import context
from nova import db
from nova.openstack.common import timeutils
from nova import service
from nova import servicegroup
from nova import test
class ServiceFixture(fixtures.Fixture):
def __init__(self, host, binary, topic):
super(ServiceFixture, self).__init__()
self.host = host
self.binary = binary
self.topic = topic
self.serv = None
def setUp(self):
super(ServiceFixture, self).setUp()
self.serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.test_service.FakeManager',
1, 1)
self.addCleanup(self.serv.kill)
class MemcachedServiceGroupTestCase(test.TestCase):
def setUp(self):
super(MemcachedServiceGroupTestCase, self).setUp()
servicegroup.API._driver = None
self.flags(servicegroup_driver='mc')
self.down_time = 15
self.flags(enable_new_services=True)
self.flags(service_down_time=self.down_time)
self.servicegroup_api = servicegroup.API(test=True)
self._host = 'foo'
self._binary = 'nova-fake'
self._topic = 'unittest'
self._ctx = context.get_admin_context()
def test_memcached_driver(self):
serv = self.useFixture(
ServiceFixture(self._host, self._binary, self._topic)).serv
serv.start()
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
hostkey = str("%s:%s" % (self._topic, self._host))
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=self.down_time)
self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
self.useFixture(test.TimeOverride())
timeutils.advance_time_seconds(self.down_time + 1)
self.servicegroup_api._driver._report_state(serv)
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
serv.stop()
timeutils.advance_time_seconds(self.down_time + 1)
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
def test_get_all(self):
host1 = self._host + '_1'
host2 = self._host + '_2'
host3 = self._host + '_3'
serv1 = self.useFixture(
ServiceFixture(host1, self._binary, self._topic)).serv
serv1.start()
serv2 = self.useFixture(
ServiceFixture(host2, self._binary, self._topic)).serv
serv2.start()
serv3 = self.useFixture(
ServiceFixture(host3, self._binary, self._topic)).serv
serv3.start()
db.service_get_by_args(self._ctx, host1, self._binary)
db.service_get_by_args(self._ctx, host2, self._binary)
db.service_get_by_args(self._ctx, host3, self._binary)
host1key = str("%s:%s" % (self._topic, host1))
host2key = str("%s:%s" % (self._topic, host2))
host3key = str("%s:%s" % (self._topic, host3))
self.servicegroup_api._driver.mc.set(host1key,
timeutils.utcnow(),
time=self.down_time)
self.servicegroup_api._driver.mc.set(host2key,
timeutils.utcnow(),
time=self.down_time)
self.servicegroup_api._driver.mc.set(host3key,
timeutils.utcnow(),
time=-1)
services = self.servicegroup_api.get_all(self._topic)
self.assertIn(host1, services)
self.assertIn(host2, services)
self.assertNotIn(host3, services)
service_id = self.servicegroup_api.get_one(self._topic)
self.assertIn(service_id, services)
def test_service_is_up(self):
serv = self.useFixture(
ServiceFixture(self._host, self._binary, self._topic)).serv
serv.start()
service_ref = db.service_get_by_args(self._ctx,
self._host,
self._binary)
fake_now = 1000
down_time = 15
self.flags(service_down_time=down_time)
self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
self.servicegroup_api = servicegroup.API()
hostkey = str("%s:%s" % (self._topic, self._host))
# Up (equal)
timeutils.utcnow_ts().AndReturn(fake_now)
timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
self.mox.ReplayAll()
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertTrue(result)
self.mox.ResetAll()
# Up
timeutils.utcnow_ts().AndReturn(fake_now)
timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
self.mox.ReplayAll()
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertTrue(result)
self.mox.ResetAll()
# Down
timeutils.utcnow_ts().AndReturn(fake_now)
timeutils.utcnow_ts().AndReturn(fake_now + down_time)
self.mox.ReplayAll()
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertFalse(result)
self.mox.ResetAll()
# Down
timeutils.utcnow_ts().AndReturn(fake_now)
timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
self.mox.ReplayAll()
self.servicegroup_api._driver.mc.set(hostkey,
timeutils.utcnow(),
time=down_time)
result = self.servicegroup_api.service_is_up(service_ref)
self.assertFalse(result)
self.mox.ResetAll()
def test_report_state(self):
serv = self.useFixture(
ServiceFixture(self._host, self._binary, self._topic)).serv
serv.start()
db.service_get_by_args(self._ctx, self._host, self._binary)
self.servicegroup_api = servicegroup.API()
# updating model_disconnected
serv.model_disconnected = True
self.servicegroup_api._driver._report_state(serv)
self.assertFalse(serv.model_disconnected)
# handling exception
serv.model_disconnected = True
self.servicegroup_api._driver.mc = None
self.servicegroup_api._driver._report_state(serv)
self.assertTrue(serv.model_disconnected)
delattr(serv, 'model_disconnected')
self.servicegroup_api._driver.mc = None
self.servicegroup_api._driver._report_state(serv)
self.assertTrue(serv.model_disconnected)
|
apache-2.0
|
sectubs/2016s-SEP
|
gr-orcatun/docs/doxygen/doxyxml/generated/compound.py
|
344
|
20296
|
#!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from string import lower as str_lower
from xml.dom import minidom
from xml.dom import Node
import sys
import compoundsuper as supermod
from compoundsuper import MixedContainer
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compounddef=None):
supermod.DoxygenType.__init__(self, version, compounddef)
def find(self, details):
return self.compounddef.find(details)
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class compounddefTypeSub(supermod.compounddefType):
def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
def find(self, details):
if self.id == details.refid:
return self
for sectiondef in self.sectiondef:
result = sectiondef.find(details)
if result:
return result
supermod.compounddefType.subclass = compounddefTypeSub
# end class compounddefTypeSub
class listofallmembersTypeSub(supermod.listofallmembersType):
def __init__(self, member=None):
supermod.listofallmembersType.__init__(self, member)
supermod.listofallmembersType.subclass = listofallmembersTypeSub
# end class listofallmembersTypeSub
class memberRefTypeSub(supermod.memberRefType):
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
supermod.memberRefType.subclass = memberRefTypeSub
# end class memberRefTypeSub
class compoundRefTypeSub(supermod.compoundRefType):
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.compoundRefType.__init__(self, mixedclass_, content_)
supermod.compoundRefType.subclass = compoundRefTypeSub
# end class compoundRefTypeSub
class reimplementTypeSub(supermod.reimplementType):
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.reimplementType.__init__(self, mixedclass_, content_)
supermod.reimplementType.subclass = reimplementTypeSub
# end class reimplementTypeSub
class incTypeSub(supermod.incType):
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.incType.__init__(self, mixedclass_, content_)
supermod.incType.subclass = incTypeSub
# end class incTypeSub
class refTypeSub(supermod.refType):
def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refType.__init__(self, mixedclass_, content_)
supermod.refType.subclass = refTypeSub
# end class refTypeSub
class refTextTypeSub(supermod.refTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refTextType.__init__(self, mixedclass_, content_)
supermod.refTextType.subclass = refTextTypeSub
# end class refTextTypeSub
class sectiondefTypeSub(supermod.sectiondefType):
def __init__(self, kind=None, header='', description=None, memberdef=None):
supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
def find(self, details):
for memberdef in self.memberdef:
if memberdef.id == details.refid:
return memberdef
return None
supermod.sectiondefType.subclass = sectiondefTypeSub
# end class sectiondefTypeSub
class memberdefTypeSub(supermod.memberdefType):
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
supermod.memberdefType.subclass = memberdefTypeSub
# end class memberdefTypeSub
class descriptionTypeSub(supermod.descriptionType):
def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
supermod.descriptionType.__init__(self, mixedclass_, content_)
supermod.descriptionType.subclass = descriptionTypeSub
# end class descriptionTypeSub
class enumvalueTypeSub(supermod.enumvalueType):
def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
supermod.enumvalueType.__init__(self, mixedclass_, content_)
supermod.enumvalueType.subclass = enumvalueTypeSub
# end class enumvalueTypeSub
class templateparamlistTypeSub(supermod.templateparamlistType):
def __init__(self, param=None):
supermod.templateparamlistType.__init__(self, param)
supermod.templateparamlistType.subclass = templateparamlistTypeSub
# end class templateparamlistTypeSub
class paramTypeSub(supermod.paramType):
def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None):
supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
supermod.paramType.subclass = paramTypeSub
# end class paramTypeSub
class linkedTextTypeSub(supermod.linkedTextType):
def __init__(self, ref=None, mixedclass_=None, content_=None):
supermod.linkedTextType.__init__(self, mixedclass_, content_)
supermod.linkedTextType.subclass = linkedTextTypeSub
# end class linkedTextTypeSub
class graphTypeSub(supermod.graphType):
def __init__(self, node=None):
supermod.graphType.__init__(self, node)
supermod.graphType.subclass = graphTypeSub
# end class graphTypeSub
class nodeTypeSub(supermod.nodeType):
def __init__(self, id=None, label='', link=None, childnode=None):
supermod.nodeType.__init__(self, id, label, link, childnode)
supermod.nodeType.subclass = nodeTypeSub
# end class nodeTypeSub
class childnodeTypeSub(supermod.childnodeType):
def __init__(self, relation=None, refid=None, edgelabel=None):
supermod.childnodeType.__init__(self, relation, refid, edgelabel)
supermod.childnodeType.subclass = childnodeTypeSub
# end class childnodeTypeSub
class linkTypeSub(supermod.linkType):
def __init__(self, refid=None, external=None, valueOf_=''):
supermod.linkType.__init__(self, refid, external)
supermod.linkType.subclass = linkTypeSub
# end class linkTypeSub
class listingTypeSub(supermod.listingType):
def __init__(self, codeline=None):
supermod.listingType.__init__(self, codeline)
supermod.listingType.subclass = listingTypeSub
# end class listingTypeSub
class codelineTypeSub(supermod.codelineType):
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
supermod.codelineType.subclass = codelineTypeSub
# end class codelineTypeSub
class highlightTypeSub(supermod.highlightType):
def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
supermod.highlightType.__init__(self, mixedclass_, content_)
supermod.highlightType.subclass = highlightTypeSub
# end class highlightTypeSub
class referenceTypeSub(supermod.referenceType):
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
supermod.referenceType.__init__(self, mixedclass_, content_)
supermod.referenceType.subclass = referenceTypeSub
# end class referenceTypeSub
class locationTypeSub(supermod.locationType):
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
supermod.locationType.subclass = locationTypeSub
# end class locationTypeSub
class docSect1TypeSub(supermod.docSect1Type):
def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect1Type.__init__(self, mixedclass_, content_)
supermod.docSect1Type.subclass = docSect1TypeSub
# end class docSect1TypeSub
class docSect2TypeSub(supermod.docSect2Type):
def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect2Type.__init__(self, mixedclass_, content_)
supermod.docSect2Type.subclass = docSect2TypeSub
# end class docSect2TypeSub
class docSect3TypeSub(supermod.docSect3Type):
def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect3Type.__init__(self, mixedclass_, content_)
supermod.docSect3Type.subclass = docSect3TypeSub
# end class docSect3TypeSub
class docSect4TypeSub(supermod.docSect4Type):
def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect4Type.__init__(self, mixedclass_, content_)
supermod.docSect4Type.subclass = docSect4TypeSub
# end class docSect4TypeSub
class docInternalTypeSub(supermod.docInternalType):
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
supermod.docInternalType.__init__(self, mixedclass_, content_)
supermod.docInternalType.subclass = docInternalTypeSub
# end class docInternalTypeSub
class docInternalS1TypeSub(supermod.docInternalS1Type):
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
supermod.docInternalS1Type.subclass = docInternalS1TypeSub
# end class docInternalS1TypeSub
class docInternalS2TypeSub(supermod.docInternalS2Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
supermod.docInternalS2Type.subclass = docInternalS2TypeSub
# end class docInternalS2TypeSub
class docInternalS3TypeSub(supermod.docInternalS3Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
supermod.docInternalS3Type.subclass = docInternalS3TypeSub
# end class docInternalS3TypeSub
class docInternalS4TypeSub(supermod.docInternalS4Type):
def __init__(self, para=None, mixedclass_=None, content_=None):
supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
supermod.docInternalS4Type.subclass = docInternalS4TypeSub
# end class docInternalS4TypeSub
class docURLLinkSub(supermod.docURLLink):
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docURLLink.__init__(self, mixedclass_, content_)
supermod.docURLLink.subclass = docURLLinkSub
# end class docURLLinkSub
class docAnchorTypeSub(supermod.docAnchorType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docAnchorType.__init__(self, mixedclass_, content_)
supermod.docAnchorType.subclass = docAnchorTypeSub
# end class docAnchorTypeSub
class docFormulaTypeSub(supermod.docFormulaType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docFormulaType.__init__(self, mixedclass_, content_)
supermod.docFormulaType.subclass = docFormulaTypeSub
# end class docFormulaTypeSub
class docIndexEntryTypeSub(supermod.docIndexEntryType):
def __init__(self, primaryie='', secondaryie=''):
supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
# end class docIndexEntryTypeSub
class docListTypeSub(supermod.docListType):
def __init__(self, listitem=None):
supermod.docListType.__init__(self, listitem)
supermod.docListType.subclass = docListTypeSub
# end class docListTypeSub
class docListItemTypeSub(supermod.docListItemType):
def __init__(self, para=None):
supermod.docListItemType.__init__(self, para)
supermod.docListItemType.subclass = docListItemTypeSub
# end class docListItemTypeSub
class docSimpleSectTypeSub(supermod.docSimpleSectType):
def __init__(self, kind=None, title=None, para=None):
supermod.docSimpleSectType.__init__(self, kind, title, para)
supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
# end class docSimpleSectTypeSub
class docVarListEntryTypeSub(supermod.docVarListEntryType):
def __init__(self, term=None):
supermod.docVarListEntryType.__init__(self, term)
supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
# end class docVarListEntryTypeSub
class docRefTextTypeSub(supermod.docRefTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docRefTextType.__init__(self, mixedclass_, content_)
supermod.docRefTextType.subclass = docRefTextTypeSub
# end class docRefTextTypeSub
class docTableTypeSub(supermod.docTableType):
def __init__(self, rows=None, cols=None, row=None, caption=None):
supermod.docTableType.__init__(self, rows, cols, row, caption)
supermod.docTableType.subclass = docTableTypeSub
# end class docTableTypeSub
class docRowTypeSub(supermod.docRowType):
def __init__(self, entry=None):
supermod.docRowType.__init__(self, entry)
supermod.docRowType.subclass = docRowTypeSub
# end class docRowTypeSub
class docEntryTypeSub(supermod.docEntryType):
def __init__(self, thead=None, para=None):
supermod.docEntryType.__init__(self, thead, para)
supermod.docEntryType.subclass = docEntryTypeSub
# end class docEntryTypeSub
class docHeadingTypeSub(supermod.docHeadingType):
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docHeadingType.__init__(self, mixedclass_, content_)
supermod.docHeadingType.subclass = docHeadingTypeSub
# end class docHeadingTypeSub
class docImageTypeSub(supermod.docImageType):
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docImageType.__init__(self, mixedclass_, content_)
supermod.docImageType.subclass = docImageTypeSub
# end class docImageTypeSub
class docDotFileTypeSub(supermod.docDotFileType):
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docDotFileType.__init__(self, mixedclass_, content_)
supermod.docDotFileType.subclass = docDotFileTypeSub
# end class docDotFileTypeSub
class docTocItemTypeSub(supermod.docTocItemType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docTocItemType.__init__(self, mixedclass_, content_)
supermod.docTocItemType.subclass = docTocItemTypeSub
# end class docTocItemTypeSub
class docTocListTypeSub(supermod.docTocListType):
def __init__(self, tocitem=None):
supermod.docTocListType.__init__(self, tocitem)
supermod.docTocListType.subclass = docTocListTypeSub
# end class docTocListTypeSub
class docLanguageTypeSub(supermod.docLanguageType):
def __init__(self, langid=None, para=None):
supermod.docLanguageType.__init__(self, langid, para)
supermod.docLanguageType.subclass = docLanguageTypeSub
# end class docLanguageTypeSub
class docParamListTypeSub(supermod.docParamListType):
def __init__(self, kind=None, parameteritem=None):
supermod.docParamListType.__init__(self, kind, parameteritem)
supermod.docParamListType.subclass = docParamListTypeSub
# end class docParamListTypeSub
class docParamListItemSub(supermod.docParamListItem):
def __init__(self, parameternamelist=None, parameterdescription=None):
supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
supermod.docParamListItem.subclass = docParamListItemSub
# end class docParamListItemSub
class docParamNameListSub(supermod.docParamNameList):
def __init__(self, parametername=None):
supermod.docParamNameList.__init__(self, parametername)
supermod.docParamNameList.subclass = docParamNameListSub
# end class docParamNameListSub
class docParamNameSub(supermod.docParamName):
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
supermod.docParamName.__init__(self, mixedclass_, content_)
supermod.docParamName.subclass = docParamNameSub
# end class docParamNameSub
class docXRefSectTypeSub(supermod.docXRefSectType):
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
supermod.docXRefSectType.subclass = docXRefSectTypeSub
# end class docXRefSectTypeSub
class docCopyTypeSub(supermod.docCopyType):
def __init__(self, link=None, para=None, sect1=None, internal=None):
supermod.docCopyType.__init__(self, link, para, sect1, internal)
supermod.docCopyType.subclass = docCopyTypeSub
# end class docCopyTypeSub
class docCharTypeSub(supermod.docCharType):
def __init__(self, char=None, valueOf_=''):
supermod.docCharType.__init__(self, char)
supermod.docCharType.subclass = docCharTypeSub
# end class docCharTypeSub
class docParaTypeSub(supermod.docParaType):
def __init__(self, char=None, valueOf_=''):
supermod.docParaType.__init__(self, char)
self.parameterlist = []
self.simplesects = []
self.content = []
def buildChildren(self, child_, nodeName_):
supermod.docParaType.buildChildren(self, child_, nodeName_)
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == "ref":
obj_ = supermod.docRefTextType.factory()
obj_.build(child_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'parameterlist':
obj_ = supermod.docParamListType.factory()
obj_.build(child_)
self.parameterlist.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'simplesect':
obj_ = supermod.docSimpleSectType.factory()
obj_.build(child_)
self.simplesects.append(obj_)
supermod.docParaType.subclass = docParaTypeSub
# end class docParaTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
|
gpl-3.0
|
archf/ansible
|
lib/ansible/modules/network/netvisor/pn_vrouterbgp.py
|
29
|
15104
|
#!/usr/bin/python
""" PN-CLI vrouter-bgp-add/vrouter-bgp-remove/vrouter-bgp-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_vrouterbgp
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove/modify vrouter-bgp.
description:
- Execute vrouter-bgp-add, vrouter-bgp-remove, vrouter-bgp-modify command.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a vRouter service that forwards traffic between
networks and implements Layer 4 protocols.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to add bgp,
'absent' to remove bgp and 'update' to modify bgp.
required: True
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify a name for the vRouter service.
required: True
pn_neighbor:
description:
- Specify a neighbor IP address to use for BGP.
- Required for vrouter-bgp-add.
pn_remote_as:
description:
- Specify the remote Autonomous System(AS) number. This value is between
1 and 4294967295.
- Required for vrouter-bgp-add.
pn_next_hop_self:
description:
- Specify if the next-hop is the same router or not.
pn_password:
description:
- Specify a password, if desired.
pn_ebgp:
description:
- Specify a value for external BGP to accept or attempt BGP connections
to external peers, not directly connected, on the network. This is a
value between 1 and 255.
pn_prefix_listin:
description:
- Specify the prefix list to filter traffic inbound.
pn_prefix_listout:
description:
- Specify the prefix list to filter traffic outbound.
pn_route_reflector:
description:
- Specify if a route reflector client is used.
pn_override_capability:
description:
- Specify if you want to override capability.
pn_soft_reconfig:
description:
- Specify if you want a soft reconfiguration of inbound traffic.
pn_max_prefix:
description:
- Specify the maximum number of prefixes.
pn_max_prefix_warn:
description:
- Specify if you want a warning message when the maximum number of
prefixes is exceeded.
pn_bfd:
description:
- Specify if you want BFD protocol support for fault detection.
pn_multiprotocol:
description:
- Specify a multi-protocol for BGP.
choices: ['ipv4-unicast', 'ipv6-unicast']
pn_weight:
description:
- Specify a default weight value between 0 and 65535 for the neighbor
routes.
pn_default_originate:
description:
- Specify if you want announce default routes to the neighbor or not.
pn_keepalive:
description:
- Specify BGP neighbor keepalive interval in seconds.
pn_holdtime:
description:
- Specify BGP neighbor holdtime in seconds.
pn_route_mapin:
description:
- Specify inbound route map for neighbor.
pn_route_mapout:
description:
- Specify outbound route map for neighbor.
"""
EXAMPLES = """
- name: add vrouter-bgp
pn_vrouterbgp:
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_neighbor: 104.104.104.1
pn_remote_as: 1800
- name: remove vrouter-bgp
pn_vrouterbgp:
state: 'absent'
pn_name: 'ansible-vrouter'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the vrouterbpg command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouterbgp command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
VROUTER_EXISTS = None
NEIGHBOR_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-bgp-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If a BGP neighbor with the given ip exists on the given vRouter,
return NEIGHBOR_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
# Global flags
global VROUTER_EXISTS, NEIGHBOR_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for BGP neighbors
show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name
show += 'format neighbor no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if neighbor in out:
NEIGHBOR_EXISTS = True
else:
NEIGHBOR_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-bgp-add'
if state == 'absent':
command = 'vrouter-bgp-remove'
if state == 'update':
command = 'vrouter-bgp-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_neighbor=dict(type='str'),
pn_remote_as=dict(type='str'),
pn_next_hop_self=dict(type='bool'),
pn_password=dict(type='str', no_log=True),
pn_ebgp=dict(type='int'),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_route_reflector=dict(type='bool'),
pn_override_capability=dict(type='bool'),
pn_soft_reconfig=dict(type='bool'),
pn_max_prefix=dict(type='int'),
pn_max_prefix_warn=dict(type='bool'),
pn_bfd=dict(type='bool'),
pn_multiprotocol=dict(type='str',
choices=['ipv4-unicast', 'ipv6-unicast']),
pn_weight=dict(type='int'),
pn_default_originate=dict(type='bool'),
pn_keepalive=dict(type='str'),
pn_holdtime=dict(type='str'),
pn_route_mapin=dict(type='str'),
pn_route_mapout=dict(type='str')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]],
["state", "absent",
["pn_vrouter_name", "pn_neighbor"]],
["state", "update",
["pn_vrouter_name", "pn_neighbor"]]
)
)
# Accessing the arguments
state= module.params['state']
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
remote_as = module.params['pn_remote_as']
next_hop_self = module.params['pn_next_hop_self']
password = module.params['pn_password']
ebgp = module.params['pn_ebgp']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
route_reflector = module.params['pn_route_reflector']
override_capability = module.params['pn_override_capability']
soft_reconfig = module.params['pn_soft_reconfig']
max_prefix = module.params['pn_max_prefix']
max_prefix_warn = module.params['pn_max_prefix_warn']
bfd = module.params['pn_bfd']
multiprotocol = module.params['pn_multiprotocol']
weight = module.params['pn_weight']
default_originate = module.params['pn_default_originate']
keepalive = module.params['pn_keepalive']
holdtime = module.params['pn_holdtime']
route_mapin = module.params['pn_route_mapin']
route_mapout = module.params['pn_route_mapout']
# Building the CLI command string
cli = pn_cli(module)
command = get_command_from_state(state)
if command == 'vrouter-bgp-remove':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is False:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s does not exist on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
else:
if command == 'vrouter-bgp-add':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is True:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s already exists on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
if remote_as:
cli += ' remote-as ' + str(remote_as)
if next_hop_self is True:
cli += ' next-hop-self '
if next_hop_self is False:
cli += ' no-next-hop-self '
if password:
cli += ' password ' + password
if ebgp:
cli += ' ebgp-multihop ' + str(ebgp)
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
if route_reflector is True:
cli += ' route-reflector-client '
if route_reflector is False:
cli += ' no-route-reflector-client '
if override_capability is True:
cli += ' override-capability '
if override_capability is False:
cli += ' no-override-capability '
if soft_reconfig is True:
cli += ' soft-reconfig-inbound '
if soft_reconfig is False:
cli += ' no-soft-reconfig-inbound '
if max_prefix:
cli += ' max-prefix ' + str(max_prefix)
if max_prefix_warn is True:
cli += ' max-prefix-warn-only '
if max_prefix_warn is False:
cli += ' no-max-prefix-warn-only '
if bfd is True:
cli += ' bfd '
if bfd is False:
cli += ' no-bfd '
if multiprotocol:
cli += ' multi-protocol ' + multiprotocol
if weight:
cli += ' weight ' + str(weight)
if default_originate is True:
cli += ' default-originate '
if default_originate is False:
cli += ' no-default-originate '
if keepalive:
cli += ' neighbor-keepalive-interval ' + keepalive
if holdtime:
cli += ' neighbor-holdtime ' + holdtime
if route_mapin:
cli += ' route-map-in ' + route_mapin
if route_mapout:
cli += ' route-map-out ' + route_mapout
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
gpl-3.0
|
qiniu/logkit
|
deploy/python-release-tool/pack/compiled.py
|
1
|
1159
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import platform
import ConfigParser
from tools import models
class Compiled(object):
def __init__(self, code_path, cf, version):
os.chdir(code_path)
self.system = platform.system().lower()
try:
remote = cf.get('code', 'remote')
self._prepare(remote, version)
except ConfigParser.NoSectionError:
pass
except ConfigParser.NoOptionError:
pass
def _prepare(self, remote, version):
""" 代码准备, 切换到指定的 release 之类的 """
pass
def _exec_shell(self, arch):
print 'compile %s %s' % (self.system, arch)
if self.system == 'windows':
os.environ['GOARCH'] = arch
os.popen('go build -o logkit.exe logkit.go')
else:
os.popen('GOARCH=%s go build -o logkit logkit.go' % (arch, ))
def main(self, arch):
if arch == models.I386:
self._exec_shell('386')
elif arch == models.AMD64:
self._exec_shell('amd64')
else:
print 'unknown arch type %s' % (arch, )
|
apache-2.0
|
KDE/twine2
|
kbindinggenerator/pplexer.py
|
1
|
8743
|
# -*- coding: utf-8 -*-
# Copyright 2007-8 Jim Bublitz <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import re
import ply.lex as lex
# handles the evaluation of conditionals
from .exprparser import ExpressionParser
newtext = []
macros = []
bitBucket = False
sentinel = False
preprocessor_tokens = ['cond', 'else', 'endif', 'include', 'define', 'undef', 'line', 'error', 'pragma', 'warning']
tokens = preprocessor_tokens + ['anyline']
values = {}
evaluate = ExpressionParser ().parse
# Completely ignored characters
t_ANY_ignore = ' \t\x0c'
def stripComment (s):
pos1 = s.find ('\/\*')
pos2 = s.find ('\/\/')
if pos1 > 0 and pos2 > 0:
pos = min (pos1, pos2)
elif pos1 < 0 and pos2 < 0:
pos = -1
else:
pos = max (pos1, pos2)
if pos > 0:
return s [:pos].strip (), s[pos:].strip ()
else:
return s, ''
def t_cond (t):
r'\#\s*(?P<ifType>ifdef\s|ifndef\s|if\s|elif\s)\s*(?P<cond>.*?)\n'
# All conditionals that perform a test are handled here
global newtext
ifType = t.lexer.lexmatch.group ('ifType').strip ()
condition, comment = stripComment (t.lexer.lexmatch.group ('cond'))
# #if/#elif look for True/False, others for defintion only
# #if defined - 'defined' is handled as an operator by the
# expression parser which evaluates the conditional
if ifType in ['if', 'elif']:
mode = 'calc'
else:
mode = 'def'
ifCondition = evaluate (condition, mode, values)
global bitBucket, sentinel
bitBucket = ((not ifCondition) and (ifType != 'ifndef')) or (ifCondition and (ifType == 'ifndef'))
# remove #define <sentinel>?
sentinel = not bitBucket and ('_h' in condition or '_H' in condition)
# A multiline comment could begin on a preprocessor line
# that's being eliminated here
if bitBucket and comment:
newtext.append (comment + '\n')
else:
newtext.append ('\n')
t.lexer.lineno += 1
def t_else (t):
r'\#\s*else(.*?)\n' # comments?
global bitBucket, newtext
bitBucket = not bitBucket
t.lexer.lineno += 1
newtext.append ('\n')
def t_endif (t):
r'\#\s*endif(.*?)\n'
global bitBucket, newtext
bitBucket = False
t.lexer.lineno += 1
newtext.append ('\n')
def t_include (t):
r'\#\s*include.*?\n'
global newtext
t.lexer.lineno += 1
newtext.append ('\n')
def t_line (t):
r'\#\s*line.*?\n'
global newtext
t.lexer.lineno += 1
newtext.append ('\n')
def t_error (t):
r'\#\s*error.*?\n'
global newtext
t.lexer.lineno += 1
newtext.append ('\n')
def t_pragma (t):
r'\#\s*pragma.*?\n'
global newtext
t.lexer.lineno += 1
newtext.append ('\n')
def t_warning (t):
r'\#\s*warning.*?\n'
global newtext
t.lexer.lineno += 1
newtext.append ('\n')
def t_undef (t):
r'\#\s*undef\s*(?P<item>.*?)\n'
global macros, values, newtext
item = t.lexer.lexmatch.group ('item').strip ()
if item in values:
macros = [macro for macro in macros if len(macro)==2 or macro[2] != item]
del values [item]
t.lexer.lineno += 1
newtext.append ('\n')
def t_define (t):
r'\#\s*define\s*(?P<first>[\S]+)\s*?(?P<second>[^\n]*?)\n'
global sentinel, values, macros, newtext
a = t.lexer.lexmatch.group ('first')
b = t.lexer.lexmatch.group ('second')
# Append any continuation lines
newlines = 1
start = t.lexer.lexpos
if b and b.endswith ('\\'):
data = t.lexer.lexdata
for i in range (start, len (data)):
if data [i] == '\n':
t.lexer.lineno += 1
newlines += 1
if data [i] == '\n' and data [i - 1] != '\\':
break
t.lexer.lexpos = i + 1
b += data [start:t.lexer.lexpos].replace ('\\\n', ' ')
if '(' in a and not ')' in a:
pos = b.find (')')
if pos < 0:
return
a += b [:pos + 1]
b = b [pos + 1:]
# remove #define <sentinel>
sentinel = sentinel and not b and ('_h' in a or '_H' in a)
if not sentinel:
if not b or '(' in a:
values [a] = ''
macros.insert (0, (re.compile (a), '', a))
else:
values [a] = b
macros.insert (0, (re.compile (a), b.strip (), a))
sentinel = False
newtext.append (newlines *'\n')
t.lexer.lineno += 1
def t_anyline (t):
r'[^\n]*?\n(([^#\n][^\n]*\n)|\n)*'
"""
Process anything that's not a preprocesor directive.
Apply all #define macros to each line. Code that has
been #if'd out (bitBucket == True) is replaced by
a single newline for each line removed.
"""
global sentinel, newtext
sentinel = False
if not bitBucket:
line = t.value
for m in macros:
line = m[0].sub(m[1], line)
newtext.append (line)
t.lexer.lineno += line.count('\n')
else:
c = t.value.count('\n')
for x in range(c):
newtext.append('\n')
t.lexer.lineno += c
# this needs to be HERE - not above token definitions
ppLexer = lex.lex (debug=0)
def preprocess (text, global_values={}, global_macros=[]):
"""
Preprocess a C/C++ header file text
Preprocesses h files - does #define substitutions and
evaluates conditionals to include/exclude code. No
substitutions are performed on preprocessor lines (any
line beginning with '#'). Global #defines are applied
LAST, so they override any local #defines.
All C preprocessor code is stripped, and along with any
lines eliminated conditionally, is replaced with newlines
so that error messages still refer to the correct line in
the original file.
Arguments:
text -- The text to process.
global_values -- Dict mapping string variable names to values.
global_macros -- List of tuples. The first value in a tuple is a
regular expression object. The second is that
replacement string which may contain re module
back references.
Returns the processed string.
"""
global newtext, bitBucket, macros, values
newtext = []
bitBucket = False
macros = [] + global_macros
values = {}
values.update (global_values)
if text[-1]!='\n':
text = text + '\n'
ppLexer.input (text)
token = ppLexer.token()
#print(newtext)
#return "".join (fixDoc (newtext))
return "".join(newtext)
def fixDoc (textList):
doReplace = False
doBackReplace = False
nLines = len (textList)
for i in range (nLines):
if i >= nLines - 1:
break
if textList [i].startswith ('/////'):
textList [i] = '\n'
continue
haveBackCmt = textList [i].find ('///<') >= 0
haveCmt = textList [i].find ('///') >= 0 and not haveBackCmt
if haveBackCmt:
if not doBackReplace:
doBackReplace = textList [i + 1].strip ().startswith ('///<')
if doBackReplace:
textList [i] = textList [i].replace ('///<', '/**<')
else:
textList [i] = textList [i].replace ('///<', '*')
elif doBackReplace:
textList.insert (i, '*/\n')
doBackReplace = False
if not haveBackCmt and haveCmt:
if not doReplace:
doReplace = textList [i + 1].strip ().startswith ('///')
if doReplace:
textList [i] = textList [i].replace ('///', '/**')
else:
textList [i] = textList [i].replace ('///', '*')
elif doReplace:
textList.insert (i, '*/\n')
doReplace = False
return textList
if __name__ == '__main__':
text = """#define foo bar"""
|
lgpl-3.0
|
liuqr/edx-xiaodun
|
lms/djangoapps/instructor/tests/test_legacy_anon_csv.py
|
13
|
2237
|
"""
Unit tests for instructor dashboard
Based on (and depends on) unit tests for courseware.
Notes for running by hand:
./manage.py lms --settings test test lms/djangoapps/instructor
"""
from django.test.utils import override_settings
# Need access to internal func to put users in the right group
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from student.roles import CourseStaffRole
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
from mock import patch
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorDashboardAnonCSV(ModuleStoreTestCase, LoginEnrollmentTestCase):
'''
Check for download of csv
'''
# Note -- I copied this setUp from a similar test
def setUp(self):
clear_existing_modulestores()
self.toy = modulestore().get_course("edX/toy/2012_Fall")
# Create two accounts
self.student = '[email protected]'
self.instructor = '[email protected]'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.create_account('u2', self.instructor, self.password)
self.activate_user(self.student)
self.activate_user(self.instructor)
CourseStaffRole(self.toy.location).add_users(User.objects.get(email=self.instructor))
self.logout()
self.login(self.instructor, self.password)
self.enroll(self.toy)
def test_download_anon_csv(self):
course = self.toy
url = reverse('instructor_dashboard', kwargs={'course_id': course.id})
with patch('instructor.views.legacy.unique_id_for_user') as mock_unique:
mock_unique.return_value = 42
response = self.client.post(url, {'action': 'Download CSV of all student anonymized IDs'})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertEqual(body, '"User ID","Anonymized user ID"\n"2","42"\n')
|
agpl-3.0
|
sidartaoliveira/ansible
|
lib/ansible/modules/windows/win_package.py
|
29
|
5089
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Trond Hindenes <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_package
version_added: "1.7"
author: Trond Hindenes
short_description: Installs/Uninstalls an installable package, either from local file system or url
description:
- Installs or uninstalls a package.
- >
Use a product_id to check if the package needs installing. You can find product ids for installed programs in the windows registry
either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall) or for 32 bit programs
C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)
options:
path:
description:
- Location of the package to be installed (either on file system, network share or url)
required: true
name:
description:
- Name of the package, if name isn't specified the path will be used for log messages
required: false
default: null
product_id:
description:
- Product id of the installed package (used for checking if already installed)
- >
You can find product ids for installed programs in the windows registry either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall)
or for 32 bit programs C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)
required: true
aliases: [productid]
arguments:
description:
- Any arguments the installer needs
default: null
required: false
state:
description:
- Install or Uninstall
choices:
- present
- absent
default: present
required: false
aliases: [ensure]
user_name:
description:
- Username of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package.
Also specify user_password for this to function properly.
default: null
required: false
user_password:
description:
- Password of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package.
Also specify user_name for this to function properly.
default: null
required: false
expected_return_code:
description:
- One or more return codes from the package installation that indicates success.
- If not provided, defaults to 0
required: no
default: 0
'''
EXAMPLES = r'''
- name: Install the Visual C thingy
win_package:
name: Microsoft Visual C thingy
path: http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe
product_id: '{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}'
arguments: /install /passive /norestart
- name: Install Remote Desktop Connection Manager from msi
win_package:
path: https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi
product_id: '{0240359E-6A4C-4884-9E94-B397A02D893C}'
- name: Uninstall Remote Desktop Connection Manager installed from msi
win_package:
path: https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi
product_id: '{0240359E-6A4C-4884-9E94-B397A02D893C}'
state: absent
# Specify the expected non-zero return code when successful
# In this case 3010 indicates 'reboot required'
- name: 'Microsoft .NET Framework 4.5.1'
win_package:
path: https://download.microsoft.com/download/1/6/7/167F0D79-9317-48AE-AEDB-17120579F8E2/NDP451-KB2858728-x86-x64-AllOS-ENU.exe
productid: '{7DEBE4EB-6B40-3766-BB35-5CBBC385DA37}'
arguments: '/q /norestart'
ensure: present
expected_return_code: 3010
# Specify multiple non-zero return codes when successful
# In this case we can say that both 0 (SUCCESSFUL) and 3010 (REBOOT REQUIRED) codes are acceptable
- name: 'Microsoft .NET Framework 4.5.1'
win_package:
path: https://download.microsoft.com/download/1/6/7/167F0D79-9317-48AE-AEDB-17120579F8E2/NDP451-KB2858728-x86-x64-AllOS-ENU.exe
productid: '{7DEBE4EB-6B40-3766-BB35-5CBBC385DA37}'
arguments: '/q /norestart'
ensure: present
expected_return_code: [0,3010]
'''
|
gpl-3.0
|
newemailjdm/pybrain
|
pybrain/optimization/populationbased/multiobjective/nsga2.py
|
25
|
3323
|
__author__ = 'Justin Bayer, Tom Schaul, {justin,tom}@idsia.ch'
from scipy import array
from pybrain.optimization.populationbased.ga import GA
from pybrain.tools.nondominated import non_dominated_front, crowding_distance, non_dominated_sort
# TODO: not very elegant, because of the conversions between tuples and arrays all the time...
class MultiObjectiveGA(GA):
""" Multi-objective Genetic Algorithm: the fitness is a vector with one entry per objective.
By default we use NSGA-II selection. """
topProportion = 0.5
elitism = True
populationSize = 100
mutationStdDev = 1.
allowEquality = True
mustMaximize = True
def _learnStep(self):
""" do one generation step """
# evaluate fitness
""" added by JPQ """
if isinstance(self.fitnesses,dict):
oldfitnesses = self.fitnesses
self.fitnesses = dict()
for indiv in self.currentpop:
if tuple(indiv) in oldfitnesses:
self.fitnesses[tuple(indiv)] = oldfitnesses[tuple(indiv)]
else:
self.fitnesses[tuple(indiv)] = self._oneEvaluation(indiv)
del oldfitnesses
else:
# ---
self.fitnesses = dict([(tuple(indiv), self._oneEvaluation(indiv)) for indiv in self.currentpop])
if self.storeAllPopulations:
self._allGenerations.append((self.currentpop, self.fitnesses))
if self.elitism:
self.bestEvaluable = list(non_dominated_front(list(map(tuple, self.currentpop)),
key=lambda x: self.fitnesses[x],
allowequality = self.allowEquality))
else:
self.bestEvaluable = list(non_dominated_front(list(map(tuple, self.currentpop))+self.bestEvaluable,
key=lambda x: self.fitnesses[x],
allowequality = self.allowEquality))
self.bestEvaluation = [self.fitnesses[indiv] for indiv in self.bestEvaluable]
self.produceOffspring()
def select(self):
return list(map(array, nsga2select(list(map(tuple, self.currentpop)), self.fitnesses,
self.selectionSize, self.allowEquality)))
def nsga2select(population, fitnesses, survivors, allowequality = True):
"""The NSGA-II selection strategy (Deb et al., 2002).
The number of individuals that survive is given by the survivors parameter."""
fronts = non_dominated_sort(population,
key=lambda x: fitnesses[x],
allowequality = allowequality)
individuals = set()
for front in fronts:
remaining = survivors - len(individuals)
if not remaining > 0:
break
if len(front) > remaining:
# If the current front does not fit in the spots left, use those
# that have the biggest crowding distance.
crowd_dist = crowding_distance(front, fitnesses)
front = sorted(front, key=lambda x: crowd_dist[x], reverse=True)
front = set(front[:remaining])
individuals |= front
return list(individuals)
|
bsd-3-clause
|
aquametalabs/django-telegram
|
telegram/handlers/email.py
|
1
|
1086
|
from django.core.mail import send_mail
from django.conf import settings
from telegram.handlers.base import BasePlatformHandler
from telegram.models import PlatformMeta
class EmailHandler(BasePlatformHandler):
def handle(self):
"""
Will try to use settings.TELEGRAM_EMAIL_HANDLER_FROM,
the platformmeta setting "subject_prepend", and the subscriptionmeta
setting "email_address".
"""
try:
meta = self.platform.platformmeta_set.get(key='subject_prepend')
subject = '%s: %s' % (meta.value, self.telegram.subject)
except PlatformMeta.DoesNotExist:
subject = self.telegram.subject
try:
from_address = self.platform.platformmeta_set.get(key='from_address').value
except PlatformMeta.DoesNotExist:
from_address = settings.TELEGRAM_EMAIL_HANDLER_FROM
send_mail(
subject,
self.telegram.content,
from_address,
[self.subscription.subscriptionmeta_set.get(key='email_address').value])
|
bsd-3-clause
|
obeattie/sqlalchemy
|
test/dialect/test_mssql.py
|
1
|
51284
|
# -*- encoding: utf-8
from sqlalchemy.test.testing import eq_
import datetime, os, re
from sqlalchemy import *
from sqlalchemy import types, exc, schema
from sqlalchemy.orm import *
from sqlalchemy.sql import table, column
from sqlalchemy.databases import mssql
from sqlalchemy.dialects.mssql import pyodbc
from sqlalchemy.engine import url
from sqlalchemy.test import *
from sqlalchemy.test.testing import eq_
class CompileTest(TestBase, AssertsCompiledSQL):
__dialect__ = mssql.dialect()
def test_insert(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.insert(), "INSERT INTO sometable (somecolumn) VALUES (:somecolumn)")
def test_update(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.update(t.c.somecolumn==7), "UPDATE sometable SET somecolumn=:somecolumn WHERE sometable.somecolumn = :somecolumn_1", dict(somecolumn=10))
def test_in_with_subqueries(self):
"""Test that when using subqueries in a binary expression
the == and != are changed to IN and NOT IN respectively.
"""
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select().where(t.c.somecolumn==t.select()), "SELECT sometable.somecolumn FROM sometable WHERE sometable.somecolumn IN (SELECT sometable.somecolumn FROM sometable)")
self.assert_compile(t.select().where(t.c.somecolumn!=t.select()), "SELECT sometable.somecolumn FROM sometable WHERE sometable.somecolumn NOT IN (SELECT sometable.somecolumn FROM sometable)")
def test_count(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.count(), "SELECT count(sometable.somecolumn) AS tbl_row_count FROM sometable")
def test_noorderby_insubquery(self):
"""test that the ms-sql dialect removes ORDER BY clauses from subqueries"""
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
q = select([table1.c.myid], order_by=[table1.c.myid]).alias('foo')
crit = q.c.myid == table1.c.myid
self.assert_compile(select(['*'], crit), """SELECT * FROM (SELECT mytable.myid AS myid FROM mytable) AS foo, mytable WHERE foo.myid = mytable.myid""")
def test_aliases_schemas(self):
metadata = MetaData()
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table4 = Table(
'remotetable', metadata,
Column('rem_id', Integer, primary_key=True),
Column('datatype_id', Integer),
Column('value', String(20)),
schema = 'remote_owner'
)
s = table4.select()
c = s.compile(dialect=self.__dialect__)
assert table4.c.rem_id in set(c.result_map['rem_id'][1])
s = table4.select(use_labels=True)
c = s.compile(dialect=self.__dialect__)
print c.result_map
assert table4.c.rem_id in set(c.result_map['remote_owner_remotetable_rem_id'][1])
self.assert_compile(table4.select(), "SELECT remotetable_1.rem_id, remotetable_1.datatype_id, remotetable_1.value FROM remote_owner.remotetable AS remotetable_1")
self.assert_compile(table4.select(use_labels=True), "SELECT remotetable_1.rem_id AS remote_owner_remotetable_rem_id, remotetable_1.datatype_id AS remote_owner_remotetable_datatype_id, remotetable_1.value AS remote_owner_remotetable_value FROM remote_owner.remotetable AS remotetable_1")
self.assert_compile(table1.join(table4, table1.c.myid==table4.c.rem_id).select(), "SELECT mytable.myid, mytable.name, mytable.description, remotetable_1.rem_id, remotetable_1.datatype_id, remotetable_1.value FROM mytable JOIN remote_owner.remotetable AS remotetable_1 ON remotetable_1.rem_id = mytable.myid")
def test_delete_schema(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer, primary_key=True), schema='paj')
self.assert_compile(tbl.delete(tbl.c.id == 1), "DELETE FROM paj.test WHERE paj.test.id = :id_1")
s = select([tbl.c.id]).where(tbl.c.id==1)
self.assert_compile(tbl.delete().where(tbl.c.id==(s)), "DELETE FROM paj.test WHERE paj.test.id IN (SELECT test_1.id FROM paj.test AS test_1 WHERE test_1.id = :id_1)")
def test_delete_schema_multipart(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer, primary_key=True), schema='banana.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1), "DELETE FROM banana.paj.test WHERE banana.paj.test.id = :id_1")
s = select([tbl.c.id]).where(tbl.c.id==1)
self.assert_compile(tbl.delete().where(tbl.c.id==(s)), "DELETE FROM banana.paj.test WHERE banana.paj.test.id IN (SELECT test_1.id FROM banana.paj.test AS test_1 WHERE test_1.id = :id_1)")
def test_delete_schema_multipart_needs_quoting(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer, primary_key=True), schema='banana split.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1), "DELETE FROM [banana split].paj.test WHERE [banana split].paj.test.id = :id_1")
s = select([tbl.c.id]).where(tbl.c.id==1)
self.assert_compile(tbl.delete().where(tbl.c.id==(s)), "DELETE FROM [banana split].paj.test WHERE [banana split].paj.test.id IN (SELECT test_1.id FROM [banana split].paj.test AS test_1 WHERE test_1.id = :id_1)")
def test_delete_schema_multipart_both_need_quoting(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer, primary_key=True), schema='banana split.paj with a space')
self.assert_compile(tbl.delete(tbl.c.id == 1), "DELETE FROM [banana split].[paj with a space].test WHERE [banana split].[paj with a space].test.id = :id_1")
s = select([tbl.c.id]).where(tbl.c.id==1)
self.assert_compile(tbl.delete().where(tbl.c.id==(s)), "DELETE FROM [banana split].[paj with a space].test WHERE [banana split].[paj with a space].test.id IN (SELECT test_1.id FROM [banana split].[paj with a space].test AS test_1 WHERE test_1.id = :id_1)")
def test_union(self):
t1 = table('t1',
column('col1'),
column('col2'),
column('col3'),
column('col4')
)
t2 = table('t2',
column('col1'),
column('col2'),
column('col3'),
column('col4'))
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')], t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')], t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2, order_by=['col3', 'col4'])
self.assert_compile(u, "SELECT t1.col3 AS col3, t1.col4 AS col4 FROM t1 WHERE t1.col2 IN (:col2_1, :col2_2) "\
"UNION SELECT t2.col3 AS col3, t2.col4 AS col4 FROM t2 WHERE t2.col2 IN (:col2_3, :col2_4) ORDER BY col3, col4")
self.assert_compile(u.alias('bar').select(), "SELECT bar.col3, bar.col4 FROM (SELECT t1.col3 AS col3, t1.col4 AS col4 FROM t1 WHERE "\
"t1.col2 IN (:col2_1, :col2_2) UNION SELECT t2.col3 AS col3, t2.col4 AS col4 FROM t2 WHERE t2.col2 IN (:col2_3, :col2_4)) AS bar")
def test_function(self):
self.assert_compile(func.foo(1, 2), "foo(:foo_1, :foo_2)")
self.assert_compile(func.current_time(), "CURRENT_TIME")
self.assert_compile(func.foo(), "foo()")
m = MetaData()
t = Table('sometable', m, Column('col1', Integer), Column('col2', Integer))
self.assert_compile(select([func.max(t.c.col1)]), "SELECT max(sometable.col1) AS max_1 FROM sometable")
def test_function_overrides(self):
self.assert_compile(func.current_date(), "GETDATE()")
self.assert_compile(func.length(3), "LEN(:length_1)")
def test_extract(self):
t = table('t', column('col1'))
for field in 'day', 'month', 'year':
self.assert_compile(
select([extract(field, t.c.col1)]),
'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % field)
def test_update_returning(self):
table1 = table('mytable',
column('myid', Integer),
column('name', String(128)),
column('description', String(128)),
)
u = update(table1, values=dict(name='foo')).returning(table1.c.myid, table1.c.name)
self.assert_compile(u, "UPDATE mytable SET name=:name OUTPUT inserted.myid, inserted.name")
u = update(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(u, "UPDATE mytable SET name=:name OUTPUT inserted.myid, "
"inserted.name, inserted.description")
u = update(table1, values=dict(name='foo')).returning(table1).where(table1.c.name=='bar')
self.assert_compile(u, "UPDATE mytable SET name=:name OUTPUT inserted.myid, "
"inserted.name, inserted.description WHERE mytable.name = :name_1")
u = update(table1, values=dict(name='foo')).returning(func.length(table1.c.name))
self.assert_compile(u, "UPDATE mytable SET name=:name OUTPUT LEN(inserted.name) AS length_1")
def test_delete_returning(self):
table1 = table('mytable',
column('myid', Integer),
column('name', String(128)),
column('description', String(128)),
)
d = delete(table1).returning(table1.c.myid, table1.c.name)
self.assert_compile(d, "DELETE FROM mytable OUTPUT deleted.myid, deleted.name")
d = delete(table1).where(table1.c.name=='bar').returning(table1.c.myid, table1.c.name)
self.assert_compile(d, "DELETE FROM mytable OUTPUT deleted.myid, deleted.name WHERE mytable.name = :name_1")
def test_insert_returning(self):
table1 = table('mytable',
column('myid', Integer),
column('name', String(128)),
column('description', String(128)),
)
i = insert(table1, values=dict(name='foo')).returning(table1.c.myid, table1.c.name)
self.assert_compile(i, "INSERT INTO mytable (name) OUTPUT inserted.myid, inserted.name VALUES (:name)")
i = insert(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(i, "INSERT INTO mytable (name) OUTPUT inserted.myid, "
"inserted.name, inserted.description VALUES (:name)")
i = insert(table1, values=dict(name='foo')).returning(func.length(table1.c.name))
self.assert_compile(i, "INSERT INTO mytable (name) OUTPUT LEN(inserted.name) AS length_1 VALUES (:name)")
class IdentityInsertTest(TestBase, AssertsCompiledSQL):
__only_on__ = 'mssql'
__dialect__ = mssql.MSDialect()
@classmethod
def setup_class(cls):
global metadata, cattable
metadata = MetaData(testing.db)
cattable = Table('cattable', metadata,
Column('id', Integer),
Column('description', String(50)),
PrimaryKeyConstraint('id', name='PK_cattable'),
)
def setup(self):
metadata.create_all()
def teardown(self):
metadata.drop_all()
def test_compiled(self):
self.assert_compile(cattable.insert().values(id=9, description='Python'), "INSERT INTO cattable (id, description) VALUES (:id, :description)")
def test_execute(self):
cattable.insert().values(id=9, description='Python').execute()
cats = cattable.select().order_by(cattable.c.id).execute()
eq_([(9, 'Python')], list(cats))
result = cattable.insert().values(description='PHP').execute()
eq_([10], result.inserted_primary_key)
lastcat = cattable.select().order_by(desc(cattable.c.id)).execute()
eq_((10, 'PHP'), lastcat.first())
def test_executemany(self):
cattable.insert().execute([
{'id': 89, 'description': 'Python'},
{'id': 8, 'description': 'Ruby'},
{'id': 3, 'description': 'Perl'},
{'id': 1, 'description': 'Java'},
])
cats = cattable.select().order_by(cattable.c.id).execute()
eq_([(1, 'Java'), (3, 'Perl'), (8, 'Ruby'), (89, 'Python')], list(cats))
cattable.insert().execute([
{'description': 'PHP'},
{'description': 'Smalltalk'},
])
lastcats = cattable.select().order_by(desc(cattable.c.id)).limit(2).execute()
eq_([(91, 'Smalltalk'), (90, 'PHP')], list(lastcats))
class ReflectionTest(TestBase, ComparesTables):
__only_on__ = 'mssql'
def test_basic_reflection(self):
meta = MetaData(testing.db)
users = Table('engine_users', meta,
Column('user_id', types.INT, primary_key=True),
Column('user_name', types.VARCHAR(20), nullable=False),
Column('test1', types.CHAR(5), nullable=False),
Column('test2', types.Float(5), nullable=False),
Column('test3', types.Text),
Column('test4', types.Numeric, nullable = False),
Column('test5', types.DateTime),
Column('parent_user_id', types.Integer,
ForeignKey('engine_users.user_id')),
Column('test6', types.DateTime, nullable=False),
Column('test7', types.Text),
Column('test8', types.LargeBinary),
Column('test_passivedefault2', types.Integer, server_default='5'),
Column('test9', types.BINARY(100)),
Column('test_numeric', types.Numeric()),
test_needs_fk=True,
)
addresses = Table('engine_email_addresses', meta,
Column('address_id', types.Integer, primary_key = True),
Column('remote_user_id', types.Integer, ForeignKey(users.c.user_id)),
Column('email_address', types.String(20)),
test_needs_fk=True,
)
meta.create_all()
try:
meta2 = MetaData()
reflected_users = Table('engine_users', meta2, autoload=True,
autoload_with=testing.db)
reflected_addresses = Table('engine_email_addresses', meta2,
autoload=True, autoload_with=testing.db)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
finally:
meta.drop_all()
def test_identity(self):
meta = MetaData(testing.db)
table = Table(
'identity_test', meta,
Column('col1', Integer, Sequence('fred', 2, 3), primary_key=True)
)
table.create()
meta2 = MetaData(testing.db)
try:
table2 = Table('identity_test', meta2, autoload=True)
sequence = isinstance(table2.c['col1'].default, schema.Sequence) \
and table2.c['col1'].default
assert sequence.start == 2
assert sequence.increment == 3
finally:
table.drop()
class QueryUnicodeTest(TestBase):
__only_on__ = 'mssql'
def test_convert_unicode(self):
meta = MetaData(testing.db)
t1 = Table('unitest_table', meta,
Column('id', Integer, primary_key=True),
Column('descr', mssql.MSText(convert_unicode=True)))
meta.create_all()
con = testing.db.connect()
# encode in UTF-8 (sting object) because this is the default dialect encoding
con.execute(u"insert into unitest_table values ('bien mangé')".encode('UTF-8'))
try:
r = t1.select().execute().first()
assert isinstance(r[1], unicode), '%s is %s instead of unicode, working on %s' % (
r[1], type(r[1]), meta.bind)
finally:
meta.drop_all()
class QueryTest(TestBase):
__only_on__ = 'mssql'
def test_fetchid_trigger(self):
meta = MetaData(testing.db)
t1 = Table('t1', meta,
Column('id', Integer, Sequence('fred', 100, 1), primary_key=True),
Column('descr', String(200)),
implicit_returning = False
)
t2 = Table('t2', meta,
Column('id', Integer, Sequence('fred', 200, 1), primary_key=True),
Column('descr', String(200)))
meta.create_all()
con = testing.db.connect()
con.execute("""create trigger paj on t1 for insert as
insert into t2 (descr) select descr from inserted""")
try:
tr = con.begin()
r = con.execute(t2.insert(), descr='hello')
self.assert_(r.inserted_primary_key == [200])
r = con.execute(t1.insert(), descr='hello')
self.assert_(r.inserted_primary_key == [100])
finally:
tr.commit()
con.execute("""drop trigger paj""")
meta.drop_all()
def test_insertid_schema(self):
meta = MetaData(testing.db)
con = testing.db.connect()
con.execute('create schema paj')
tbl = Table('test', meta, Column('id', Integer, primary_key=True), schema='paj')
tbl.create()
try:
tbl.insert().execute({'id':1})
finally:
tbl.drop()
con.execute('drop schema paj')
def test_returning_no_autoinc(self):
meta = MetaData(testing.db)
table = Table('t1', meta, Column('id', Integer, primary_key=True), Column('data', String(50)))
table.create()
try:
result = table.insert().values(id=1, data=func.lower("SomeString")).returning(table.c.id, table.c.data).execute()
eq_(result.fetchall(), [(1, 'somestring',)])
finally:
# this will hang if the "SET IDENTITY_INSERT t1 OFF" occurs before the
# result is fetched
table.drop()
def test_delete_schema(self):
meta = MetaData(testing.db)
con = testing.db.connect()
con.execute('create schema paj')
tbl = Table('test', meta, Column('id', Integer, primary_key=True), schema='paj')
tbl.create()
try:
tbl.insert().execute({'id':1})
tbl.delete(tbl.c.id == 1).execute()
finally:
tbl.drop()
con.execute('drop schema paj')
def test_insertid_reserved(self):
meta = MetaData(testing.db)
table = Table(
'select', meta,
Column('col', Integer, primary_key=True)
)
table.create()
meta2 = MetaData(testing.db)
try:
table.insert().execute(col=7)
finally:
table.drop()
class Foo(object):
def __init__(self, **kw):
for k in kw:
setattr(self, k, kw[k])
class GenerativeQueryTest(TestBase):
__only_on__ = 'mssql'
@classmethod
def setup_class(cls):
global foo, metadata
metadata = MetaData(testing.db)
foo = Table('foo', metadata,
Column('id', Integer, Sequence('foo_id_seq'),
primary_key=True),
Column('bar', Integer),
Column('range', Integer))
mapper(Foo, foo)
metadata.create_all()
sess = create_session(bind=testing.db)
for i in range(100):
sess.add(Foo(bar=i, range=i%10))
sess.flush()
@classmethod
def teardown_class(cls):
metadata.drop_all()
clear_mappers()
def test_slice_mssql(self):
sess = create_session(bind=testing.db)
query = sess.query(Foo)
orig = query.all()
assert list(query[:10]) == orig[:10]
assert list(query[:10]) == orig[:10]
class SchemaTest(TestBase):
def setup(self):
t = Table('sometable', MetaData(),
Column('pk_column', Integer),
Column('test_column', String)
)
self.column = t.c.test_column
dialect = mssql.dialect()
self.ddl_compiler = dialect.ddl_compiler(dialect, schema.CreateTable(t))
def _column_spec(self):
return self.ddl_compiler.get_column_specification(self.column)
def test_that_mssql_default_nullability_emits_null(self):
eq_("test_column VARCHAR NULL", self._column_spec())
def test_that_mssql_none_nullability_does_not_emit_nullability(self):
self.column.nullable = None
eq_("test_column VARCHAR", self._column_spec())
def test_that_mssql_specified_nullable_emits_null(self):
self.column.nullable = True
eq_("test_column VARCHAR NULL", self._column_spec())
def test_that_mssql_specified_not_nullable_emits_not_null(self):
self.column.nullable = False
eq_("test_column VARCHAR NOT NULL", self._column_spec())
def full_text_search_missing():
"""Test if full text search is not implemented and return False if
it is and True otherwise."""
try:
connection = testing.db.connect()
try:
connection.execute("CREATE FULLTEXT CATALOG Catalog AS DEFAULT")
return False
except:
return True
finally:
connection.close()
class MatchTest(TestBase, AssertsCompiledSQL):
__only_on__ = 'mssql'
__skip_if__ = (full_text_search_missing, )
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
cattable = Table('cattable', metadata,
Column('id', Integer),
Column('description', String(50)),
PrimaryKeyConstraint('id', name='PK_cattable'),
)
matchtable = Table('matchtable', metadata,
Column('id', Integer),
Column('title', String(200)),
Column('category_id', Integer, ForeignKey('cattable.id')),
PrimaryKeyConstraint('id', name='PK_matchtable'),
)
DDL("""CREATE FULLTEXT INDEX
ON cattable (description)
KEY INDEX PK_cattable"""
).execute_at('after-create', matchtable)
DDL("""CREATE FULLTEXT INDEX
ON matchtable (title)
KEY INDEX PK_matchtable"""
).execute_at('after-create', matchtable)
metadata.create_all()
cattable.insert().execute([
{'id': 1, 'description': 'Python'},
{'id': 2, 'description': 'Ruby'},
])
matchtable.insert().execute([
{'id': 1, 'title': 'Agile Web Development with Rails', 'category_id': 2},
{'id': 2, 'title': 'Dive Into Python', 'category_id': 1},
{'id': 3, 'title': "Programming Matz's Ruby", 'category_id': 2},
{'id': 4, 'title': 'The Definitive Guide to Django', 'category_id': 1},
{'id': 5, 'title': 'Python in a Nutshell', 'category_id': 1}
])
DDL("WAITFOR DELAY '00:00:05'").execute(bind=engines.testing_engine())
@classmethod
def teardown_class(cls):
metadata.drop_all()
connection = testing.db.connect()
connection.execute("DROP FULLTEXT CATALOG Catalog")
connection.close()
def test_expression(self):
self.assert_compile(matchtable.c.title.match('somstr'), "CONTAINS (matchtable.title, ?)")
def test_simple_match(self):
results = matchtable.select().where(matchtable.c.title.match('python')).order_by(matchtable.c.id).execute().fetchall()
eq_([2, 5], [r.id for r in results])
def test_simple_match_with_apostrophe(self):
results = matchtable.select().where(matchtable.c.title.match("Matz's")).execute().fetchall()
eq_([3], [r.id for r in results])
def test_simple_prefix_match(self):
results = matchtable.select().where(matchtable.c.title.match('"nut*"')).execute().fetchall()
eq_([5], [r.id for r in results])
def test_simple_inflectional_match(self):
results = matchtable.select().where(matchtable.c.title.match('FORMSOF(INFLECTIONAL, "dives")')).execute().fetchall()
eq_([2], [r.id for r in results])
def test_or_match(self):
results1 = matchtable.select().where(or_(matchtable.c.title.match('nutshell'),
matchtable.c.title.match('ruby'))
).order_by(matchtable.c.id).execute().fetchall()
eq_([3, 5], [r.id for r in results1])
results2 = matchtable.select().where(matchtable.c.title.match('nutshell OR ruby'),
).order_by(matchtable.c.id).execute().fetchall()
eq_([3, 5], [r.id for r in results2])
def test_and_match(self):
results1 = matchtable.select().where(and_(matchtable.c.title.match('python'),
matchtable.c.title.match('nutshell'))
).execute().fetchall()
eq_([5], [r.id for r in results1])
results2 = matchtable.select().where(matchtable.c.title.match('python AND nutshell'),
).execute().fetchall()
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = matchtable.select().where(and_(cattable.c.id==matchtable.c.category_id,
or_(cattable.c.description.match('Ruby'),
matchtable.c.title.match('nutshell')))
).order_by(matchtable.c.id).execute().fetchall()
eq_([1, 3, 5], [r.id for r in results])
class ParseConnectTest(TestBase, AssertsCompiledSQL):
__only_on__ = 'mssql'
@classmethod
def setup_class(cls):
global dialect
dialect = pyodbc.MSDialect_pyodbc()
def test_pyodbc_connect_dsn_trusted(self):
u = url.make_url('mssql://mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_old_style_dsn_trusted(self):
u = url.make_url('mssql:///?dsn=mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_dsn_non_trusted(self):
u = url.make_url('mssql://username:password@mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_dsn_extra(self):
u = url.make_url('mssql://username:password@mydsn/?LANGUAGE=us_english&foo=bar')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;UID=username;PWD=password;LANGUAGE=us_english;foo=bar'], {}], connection)
def test_pyodbc_connect(self):
u = url.make_url('mssql://username:password@hostspec/database')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_comma_port(self):
u = url.make_url('mssql://username:password@hostspec:12345/database')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec,12345;Database=database;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_config_port(self):
u = url.make_url('mssql://username:password@hostspec/database?port=12345')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UID=username;PWD=password;port=12345'], {}], connection)
def test_pyodbc_extra_connect(self):
u = url.make_url('mssql://username:password@hostspec/database?LANGUAGE=us_english&foo=bar')
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(connection[0][0] in
('DRIVER={SQL Server};Server=hostspec;Database=database;UID=username;PWD=password;foo=bar;LANGUAGE=us_english',
'DRIVER={SQL Server};Server=hostspec;Database=database;UID=username;PWD=password;LANGUAGE=us_english;foo=bar'), True)
def test_pyodbc_odbc_connect(self):
u = url.make_url('mssql:///?odbc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UID=username;PWD=password'], {}], connection)
def test_pyodbc_odbc_connect_with_dsn(self):
u = url.make_url('mssql:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Database=database;UID=username;PWD=password'], {}], connection)
def test_pyodbc_odbc_connect_ignores_other_values(self):
u = url.make_url('mssql://userdiff:passdiff@localhost/dbdiff?odbc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UID=username;PWD=password'], {}], connection)
class TypesTest(TestBase, AssertsExecutionResults, ComparesTables):
__only_on__ = 'mssql'
@classmethod
def setup_class(cls):
global metadata
metadata = MetaData(testing.db)
def teardown(self):
metadata.drop_all()
@testing.fails_on_everything_except('mssql+pyodbc', 'this is some pyodbc-specific feature')
def test_decimal_notation(self):
import decimal
numeric_table = Table('numeric_table', metadata,
Column('id', Integer, Sequence('numeric_id_seq', optional=True), primary_key=True),
Column('numericcol', Numeric(precision=38, scale=20, asdecimal=True))
)
metadata.create_all()
test_items = [decimal.Decimal(d) for d in '1500000.00000000000000000000',
'-1500000.00000000000000000000', '1500000',
'0.0000000000000000002', '0.2', '-0.0000000000000000002', '-2E-2',
'156666.458923543', '-156666.458923543', '1', '-1', '-1234', '1234',
'2E-12', '4E8', '3E-6', '3E-7', '4.1', '1E-1', '1E-2', '1E-3',
'1E-4', '1E-5', '1E-6', '1E-7', '1E-1', '1E-8', '0.2732E2',
'-0.2432E2', '4.35656E2',
'-02452E-2', '45125E-2',
'1234.58965E-2', '1.521E+15', '-1E-25', '1E-25', '1254E-25', '-1203E-25',
'0', '-0.00', '-0', '4585E12', '000000000000000000012', '000000000000.32E12',
'00000000000000.1E+12', '000000000000.2E-32']
for value in test_items:
numeric_table.insert().execute(numericcol=value)
for value in select([numeric_table.c.numericcol]).execute():
assert value[0] in test_items, "%r not in test_items" % value[0]
def test_float(self):
float_table = Table('float_table', metadata,
Column('id', Integer, Sequence('numeric_id_seq', optional=True), primary_key=True),
Column('floatcol', Float())
)
metadata.create_all()
try:
test_items = [float(d) for d in '1500000.00000000000000000000',
'-1500000.00000000000000000000', '1500000',
'0.0000000000000000002', '0.2', '-0.0000000000000000002',
'156666.458923543', '-156666.458923543', '1', '-1', '1234',
'2E-12', '4E8', '3E-6', '3E-7', '4.1', '1E-1', '1E-2', '1E-3',
'1E-4', '1E-5', '1E-6', '1E-7', '1E-8']
for value in test_items:
float_table.insert().execute(floatcol=value)
except Exception, e:
raise e
def test_money(self):
"Exercise type specification for money types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSMoney, [], {},
'MONEY'),
(mssql.MSSmallMoney, [], {},
'SMALLMONEY'),
]
table_args = ['test_mssql_money', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw), nullable=None))
money_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(money_table))
for col in money_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
try:
money_table.create(checkfirst=True)
assert True
except:
raise
money_table.drop()
def test_dates(self):
"Exercise type specification for date types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSDateTime, [], {},
'DATETIME', []),
(types.DATE, [], {},
'DATE', ['>=', (10,)]),
(types.Date, [], {},
'DATE', ['>=', (10,)]),
(types.Date, [], {},
'DATETIME', ['<', (10,)], mssql.MSDateTime),
(mssql.MSDate, [], {},
'DATE', ['>=', (10,)]),
(mssql.MSDate, [], {},
'DATETIME', ['<', (10,)], mssql.MSDateTime),
(types.TIME, [], {},
'TIME', ['>=', (10,)]),
(types.Time, [], {},
'TIME', ['>=', (10,)]),
(mssql.MSTime, [], {},
'TIME', ['>=', (10,)]),
(mssql.MSTime, [1], {},
'TIME(1)', ['>=', (10,)]),
(types.Time, [], {},
'DATETIME', ['<', (10,)], mssql.MSDateTime),
(mssql.MSTime, [], {},
'TIME', ['>=', (10,)]),
(mssql.MSSmallDateTime, [], {},
'SMALLDATETIME', []),
(mssql.MSDateTimeOffset, [], {},
'DATETIMEOFFSET', ['>=', (10,)]),
(mssql.MSDateTimeOffset, [1], {},
'DATETIMEOFFSET(1)', ['>=', (10,)]),
(mssql.MSDateTime2, [], {},
'DATETIME2', ['>=', (10,)]),
(mssql.MSDateTime2, [1], {},
'DATETIME2(1)', ['>=', (10,)]),
]
table_args = ['test_mssql_dates', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res, requires = spec[0:5]
if (requires and testing._is_excluded('mssql', *requires)) or not requires:
table_args.append(Column('c%s' % index, type_(*args, **kw), nullable=None))
dates_table = Table(*table_args)
gen = testing.db.dialect.ddl_compiler(testing.db.dialect, schema.CreateTable(dates_table))
for col in dates_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
dates_table.create(checkfirst=True)
reflected_dates = Table('test_mssql_dates', MetaData(testing.db), autoload=True)
for col in reflected_dates.c:
self.assert_types_base(col, dates_table.c[col.key])
def test_date_roundtrip(self):
t = Table('test_dates', metadata,
Column('id', Integer,
Sequence('datetest_id_seq', optional=True),
primary_key=True),
Column('adate', Date),
Column('atime', Time),
Column('adatetime', DateTime))
metadata.create_all()
d1 = datetime.date(2007, 10, 30)
t1 = datetime.time(11, 2, 32)
d2 = datetime.datetime(2007, 10, 30, 11, 2, 32)
t.insert().execute(adate=d1, adatetime=d2, atime=t1)
t.insert().execute(adate=d2, adatetime=d2, atime=d2)
x = t.select().execute().fetchall()[0]
self.assert_(x.adate.__class__ == datetime.date)
self.assert_(x.atime.__class__ == datetime.time)
self.assert_(x.adatetime.__class__ == datetime.datetime)
t.delete().execute()
t.insert().execute(adate=d1, adatetime=d2, atime=t1)
eq_(select([t.c.adate, t.c.atime, t.c.adatetime], t.c.adate==d1).execute().fetchall(), [(d1, t1, d2)])
def test_binary(self):
"Exercise type specification for binary types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSBinary, [], {},
'BINARY'),
(mssql.MSBinary, [10], {},
'BINARY(10)'),
(types.BINARY, [], {},
'BINARY'),
(types.BINARY, [10], {},
'BINARY(10)'),
(mssql.MSVarBinary, [], {},
'VARBINARY'),
(mssql.MSVarBinary, [10], {},
'VARBINARY(10)'),
(types.VARBINARY, [], {},
'VARBINARY'),
(types.VARBINARY, [10], {},
'VARBINARY(10)'),
(mssql.MSImage, [], {},
'IMAGE'),
(mssql.IMAGE, [], {},
'IMAGE'),
(types.LargeBinary, [], {},
'IMAGE'),
]
table_args = ['test_mssql_binary', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw), nullable=None))
binary_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(binary_table))
for col in binary_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
metadata.create_all()
reflected_binary = Table('test_mssql_binary', MetaData(testing.db), autoload=True)
for col in reflected_binary.c:
c1 =testing.db.dialect.type_descriptor(col.type).__class__
c2 =testing.db.dialect.type_descriptor(binary_table.c[col.name].type).__class__
assert issubclass(c1, c2), "%r is not a subclass of %r" % (c1, c2)
if binary_table.c[col.name].type.length:
testing.eq_(col.type.length, binary_table.c[col.name].type.length)
def test_boolean(self):
"Exercise type specification for boolean type."
columns = [
# column type, args, kwargs, expected ddl
(Boolean, [], {},
'BIT'),
]
table_args = ['test_mssql_boolean', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw), nullable=None))
boolean_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(boolean_table))
for col in boolean_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
metadata.create_all()
def test_numeric(self):
"Exercise type specification and options for numeric types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSNumeric, [], {},
'NUMERIC'),
(mssql.MSNumeric, [None], {},
'NUMERIC'),
(mssql.MSNumeric, [12, 4], {},
'NUMERIC(12, 4)'),
(types.Float, [], {},
'FLOAT'),
(types.Float, [None], {},
'FLOAT'),
(types.Float, [12], {},
'FLOAT(12)'),
(mssql.MSReal, [], {},
'REAL'),
(types.Integer, [], {},
'INTEGER'),
(types.BigInteger, [], {},
'BIGINT'),
(mssql.MSTinyInteger, [], {},
'TINYINT'),
(types.SmallInteger, [], {},
'SMALLINT'),
]
table_args = ['test_mssql_numeric', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw), nullable=None))
numeric_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(numeric_table))
for col in numeric_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
metadata.create_all()
def test_char(self):
"""Exercise COLLATE-ish options on string types."""
columns = [
(mssql.MSChar, [], {},
'CHAR'),
(mssql.MSChar, [1], {},
'CHAR(1)'),
(mssql.MSChar, [1], {'collation': 'Latin1_General_CI_AS'},
'CHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSNChar, [], {},
'NCHAR'),
(mssql.MSNChar, [1], {},
'NCHAR(1)'),
(mssql.MSNChar, [1], {'collation': 'Latin1_General_CI_AS'},
'NCHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSString, [], {},
'VARCHAR'),
(mssql.MSString, [1], {},
'VARCHAR(1)'),
(mssql.MSString, [1], {'collation': 'Latin1_General_CI_AS'},
'VARCHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSNVarchar, [], {},
'NVARCHAR'),
(mssql.MSNVarchar, [1], {},
'NVARCHAR(1)'),
(mssql.MSNVarchar, [1], {'collation': 'Latin1_General_CI_AS'},
'NVARCHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSText, [], {},
'TEXT'),
(mssql.MSText, [], {'collation': 'Latin1_General_CI_AS'},
'TEXT COLLATE Latin1_General_CI_AS'),
(mssql.MSNText, [], {},
'NTEXT'),
(mssql.MSNText, [], {'collation': 'Latin1_General_CI_AS'},
'NTEXT COLLATE Latin1_General_CI_AS'),
]
table_args = ['test_mssql_charset', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw), nullable=None))
charset_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(charset_table))
for col in charset_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
metadata.create_all()
def test_timestamp(self):
"""Exercise TIMESTAMP column."""
dialect = mssql.dialect()
spec, expected = (TIMESTAMP,'TIMESTAMP')
t = Table('mssql_ts', metadata,
Column('id', Integer, primary_key=True),
Column('t', spec, nullable=None))
gen = dialect.ddl_compiler(dialect, schema.CreateTable(t))
testing.eq_(gen.get_column_specification(t.c.t), "t %s" % expected)
self.assert_(repr(t.c.t))
t.create(checkfirst=True)
def test_autoincrement(self):
Table('ai_1', metadata,
Column('int_y', Integer, primary_key=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table('ai_2', metadata,
Column('int_y', Integer, primary_key=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table('ai_3', metadata,
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
Column('int_y', Integer, primary_key=True))
Table('ai_4', metadata,
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
Column('int_n2', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table('ai_5', metadata,
Column('int_y', Integer, primary_key=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table('ai_6', metadata,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('int_y', Integer, primary_key=True))
Table('ai_7', metadata,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('o2', String(1), DefaultClause('x'),
primary_key=True),
Column('int_y', Integer, primary_key=True))
Table('ai_8', metadata,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('o2', String(1), DefaultClause('x'),
primary_key=True))
metadata.create_all()
table_names = ['ai_1', 'ai_2', 'ai_3', 'ai_4',
'ai_5', 'ai_6', 'ai_7', 'ai_8']
mr = MetaData(testing.db)
for name in table_names:
tbl = Table(name, mr, autoload=True)
tbl = metadata.tables[name]
for c in tbl.c:
if c.name.startswith('int_y'):
assert c.autoincrement, name
assert tbl._autoincrement_column is c, name
elif c.name.startswith('int_n'):
assert not c.autoincrement, name
assert tbl._autoincrement_column is not c, name
# mxodbc can't handle scope_identity() with DEFAULT VALUES
if testing.db.driver == 'mxodbc':
eng = [engines.testing_engine(options={'implicit_returning':True})]
else:
eng = [
engines.testing_engine(options={'implicit_returning':False}),
engines.testing_engine(options={'implicit_returning':True}),
]
for counter, engine in enumerate(eng):
engine.execute(tbl.insert())
if 'int_y' in tbl.c:
assert engine.scalar(select([tbl.c.int_y])) == counter + 1
assert list(engine.execute(tbl.select()).first()).count(counter + 1) == 1
else:
assert 1 not in list(engine.execute(tbl.select()).first())
engine.execute(tbl.delete())
class BinaryTest(TestBase, AssertsExecutionResults):
"""Test the Binary and VarBinary types"""
__only_on__ = 'mssql'
@classmethod
def setup_class(cls):
global binary_table, MyPickleType
class MyPickleType(types.TypeDecorator):
impl = PickleType
def process_bind_param(self, value, dialect):
if value:
value.stuff = 'this is modified stuff'
return value
def process_result_value(self, value, dialect):
if value:
value.stuff = 'this is the right stuff'
return value
binary_table = Table('binary_table', MetaData(testing.db),
Column('primary_id', Integer, Sequence('binary_id_seq', optional=True), primary_key=True),
Column('data', mssql.MSVarBinary(8000)),
Column('data_image', mssql.MSImage),
Column('data_slice', types.BINARY(100)),
Column('misc', String(30)),
# construct PickleType with non-native pickle module, since cPickle uses relative module
# loading and confuses this test's parent package 'sql' with the 'sqlalchemy.sql' package relative
# to the 'types' module
Column('pickled', PickleType),
Column('mypickle', MyPickleType)
)
binary_table.create()
def teardown(self):
binary_table.delete().execute()
@classmethod
def teardown_class(cls):
binary_table.drop()
def test_binary(self):
testobj1 = pickleable.Foo('im foo 1')
testobj2 = pickleable.Foo('im foo 2')
testobj3 = pickleable.Foo('im foo 3')
stream1 = self.load_stream('binary_data_one.dat')
stream2 = self.load_stream('binary_data_two.dat')
binary_table.insert().execute(primary_id=1, misc='binary_data_one.dat', data=stream1, data_image=stream1, data_slice=stream1[0:100], pickled=testobj1, mypickle=testobj3)
binary_table.insert().execute(primary_id=2, misc='binary_data_two.dat', data=stream2, data_image=stream2, data_slice=stream2[0:99], pickled=testobj2)
# TODO: pyodbc does not seem to accept "None" for a VARBINARY column (data=None).
# error: [Microsoft][ODBC SQL Server Driver][SQL Server]Implicit conversion from
# data type varchar to varbinary is not allowed. Use the CONVERT function to run this query. (257)
#binary_table.insert().execute(primary_id=3, misc='binary_data_two.dat', data=None, data_image=None, data_slice=stream2[0:99], pickled=None)
binary_table.insert().execute(primary_id=3, misc='binary_data_two.dat', data_image=None, data_slice=stream2[0:99], pickled=None)
for stmt in (
binary_table.select(order_by=binary_table.c.primary_id),
text("select * from binary_table order by binary_table.primary_id",
typemap=dict(data=mssql.MSVarBinary(8000), data_image=mssql.MSImage,
data_slice=types.BINARY(100), pickled=PickleType, mypickle=MyPickleType),
bind=testing.db)
):
l = stmt.execute().fetchall()
eq_(list(stream1), list(l[0]['data']))
paddedstream = list(stream1[0:100])
paddedstream.extend(['\x00'] * (100 - len(paddedstream)))
eq_(paddedstream, list(l[0]['data_slice']))
eq_(list(stream2), list(l[1]['data']))
eq_(list(stream2), list(l[1]['data_image']))
eq_(testobj1, l[0]['pickled'])
eq_(testobj2, l[1]['pickled'])
eq_(testobj3.moredata, l[0]['mypickle'].moredata)
eq_(l[0]['mypickle'].stuff, 'this is the right stuff')
def load_stream(self, name, len=3000):
fp = open(os.path.join(os.path.dirname(__file__), "..", name), 'rb')
stream = fp.read(len)
fp.close()
return stream
|
mit
|
Tranzystorek/servo
|
etc/servo_gdb.py
|
233
|
4284
|
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
A set of simple pretty printers for gdb to make debugging Servo a bit easier.
To load these, you need to add something like the following to your .gdbinit file:
python
import sys
sys.path.insert(0, '/home/<path to git checkout>/servo/src/etc')
import servo_gdb
servo_gdb.register_printers(None)
end
"""
import gdb
# Print Au in both raw value and CSS pixels
class AuPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
i32_type = gdb.lookup_type("i32")
au = self.val.cast(i32_type)
return "{0}px".format(au / 60.0)
# Print a U8 bitfield as binary
class BitFieldU8Printer:
def __init__(self, val):
self.val = val
def to_string(self):
u8_type = gdb.lookup_type("u8")
value = self.val.cast(u8_type)
return "[{0:#010b}]".format(int(value))
# Print a struct with fields as children
class ChildPrinter:
def __init__(self, val):
self.val = val
def children(self):
children = []
for f in self.val.type.fields():
children.append((f.name, self.val[f.name]))
return children
def to_string(self):
return None
# Allow a trusted node to be dereferenced in the debugger
class TrustedNodeAddressPrinter:
def __init__(self, val):
self.val = val
def children(self):
node_type = gdb.lookup_type("struct script::dom::node::Node").pointer()
value = self.val.cast(node_type)
return [('Node', value)]
def to_string(self):
return self.val.address
# Extract a node type ID from enum
class NodeTypeIdPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
u8_ptr_type = gdb.lookup_type("u8").pointer()
enum_0 = self.val.address.cast(u8_ptr_type).dereference()
enum_type = self.val.type.fields()[int(enum_0)].type
return str(enum_type).lstrip('struct ')
# Printer for std::Option<>
class OptionPrinter:
def __init__(self, val):
self.val = val
def is_some(self):
# Get size of discriminator
d_size = self.val.type.fields()[0].type.sizeof
if d_size > 0 and d_size <= 8:
# Read first byte to check if None or Some
ptr = self.val.address.cast(gdb.lookup_type("unsigned char").pointer())
discriminator = int(ptr.dereference())
return discriminator != 0
raise "unhandled discriminator size"
def children(self):
if self.is_some():
option_type = self.val.type
# Get total size and size of value
ptr = self.val.address.cast(gdb.lookup_type("unsigned char").pointer())
t_size = option_type.sizeof
value_type = option_type.fields()[1].type.fields()[1].type
v_size = value_type.sizeof
data_ptr = (ptr + t_size - v_size).cast(value_type.pointer()).dereference()
return [('Some', data_ptr)]
return [('None', None)]
def to_string(self):
return None
# Useful for debugging when type is unknown
class TestPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
return "[UNKNOWN - type = {0}]".format(str(self.val.type))
type_map = [
('struct Au', AuPrinter),
('FlowFlags', BitFieldU8Printer),
('IntrinsicWidths', ChildPrinter),
('PlacementInfo', ChildPrinter),
('TrustedNodeAddress', TrustedNodeAddressPrinter),
('NodeTypeId', NodeTypeIdPrinter),
('Option', OptionPrinter),
]
def lookup_servo_type(val):
val_type = str(val.type)
for (type_name, printer) in type_map:
if val_type == type_name or val_type.endswith("::" + type_name):
return printer(val)
return None
# return TestPrinter(val)
def register_printers(obj):
gdb.pretty_printers.append(lookup_servo_type)
|
mpl-2.0
|
ccrook/Quantum-GIS
|
python/plugins/processing/algs/grass7/ext/i_cca.py
|
5
|
1756
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
i_cca.py
--------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .i import verifyRasterNum, regroupRasters, importSigFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return verifyRasterNum(alg, parameters, context, 'input', 2, 8)
def processCommand(alg, parameters, context):
# Regroup rasters
group, subgroup = regroupRasters(alg, parameters, context,
'input', 'group', 'subgroup')
signatureFile = alg.parameterAsString(parameters, 'signature', context)
shortSigFile = importSigFile(alg, group, subgroup, signatureFile)
parameters['signature'] = shortSigFile
# Handle other parameters
alg.processCommand(parameters, context)
|
gpl-2.0
|
AnimeshSinha1309/WebsiteEdunet
|
WebsiteEdunet/env/Lib/site-packages/django/conf/locale/fi/formats.py
|
504
|
1390
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = r'j. E Y \k\e\l\l\o G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.n.Y'
SHORT_DATETIME_FORMAT = 'j.n.Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y', # '20.3.14'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H.%M.%S', # '20.3.2014 14.30.59'
'%d.%m.%Y %H.%M.%S.%f', # '20.3.2014 14.30.59.000200'
'%d.%m.%Y %H.%M', # '20.3.2014 14.30'
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y %H.%M.%S', # '20.3.14 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '20.3.14 14.30.59.000200'
'%d.%m.%y %H.%M', # '20.3.14 14.30'
'%d.%m.%y', # '20.3.14'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # Non-breaking space
NUMBER_GROUPING = 3
|
mit
|
tseaver/gcloud-python
|
datastore/google/cloud/datastore/key.py
|
3
|
21641
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google Cloud Datastore keys."""
import base64
import copy
import six
from google.cloud.datastore_v1.proto import entity_pb2 as _entity_pb2
from google.cloud._helpers import _to_bytes
from google.cloud.datastore import _app_engine_key_pb2
_DATABASE_ID_TEMPLATE = (
'Received non-empty database ID: {!r}.\n'
'urlsafe strings are not expected to encode a Reference that '
'contains a database ID.')
_BAD_ELEMENT_TEMPLATE = (
'At most one of ID and name can be set on an element. Received '
'id = {!r} and name = {!r}.')
_EMPTY_ELEMENT = (
'Exactly one of ID and name must be set on an element. '
'Encountered an element with neither set that was not the last '
'element of a path.')
class Key(object):
"""An immutable representation of a datastore Key.
.. testsetup:: key-ctor
from google.cloud import datastore
project = 'my-special-pony'
client = datastore.Client(project=project)
Key = datastore.Key
parent_key = client.key('Parent', 'foo')
To create a basic key directly:
.. doctest:: key-ctor
>>> Key('EntityKind', 1234, project=project)
<Key('EntityKind', 1234), project=...>
>>> Key('EntityKind', 'foo', project=project)
<Key('EntityKind', 'foo'), project=...>
Though typical usage comes via the
:meth:`~google.cloud.datastore.client.Client.key` factory:
.. doctest:: key-ctor
>>> client.key('EntityKind', 1234)
<Key('EntityKind', 1234), project=...>
>>> client.key('EntityKind', 'foo')
<Key('EntityKind', 'foo'), project=...>
To create a key with a parent:
.. doctest:: key-ctor
>>> client.key('Parent', 'foo', 'Child', 1234)
<Key('Parent', 'foo', 'Child', 1234), project=...>
>>> client.key('Child', 1234, parent=parent_key)
<Key('Parent', 'foo', 'Child', 1234), project=...>
To create a partial key:
.. doctest:: key-ctor
>>> client.key('Parent', 'foo', 'Child')
<Key('Parent', 'foo', 'Child'), project=...>
:type path_args: tuple of string and integer
:param path_args: May represent a partial (odd length) or full (even
length) key path.
:type kwargs: dict
:param kwargs: Keyword arguments to be passed in.
Accepted keyword arguments are
* namespace (string): A namespace identifier for the key.
* project (string): The project associated with the key.
* parent (:class:`~google.cloud.datastore.key.Key`): The parent of the key.
The project argument is required unless it has been set implicitly.
"""
def __init__(self, *path_args, **kwargs):
self._flat_path = path_args
parent = self._parent = kwargs.get('parent')
self._namespace = kwargs.get('namespace')
project = kwargs.get('project')
self._project = _validate_project(project, parent)
# _flat_path, _parent, _namespace and _project must be set before
# _combine_args() is called.
self._path = self._combine_args()
def __eq__(self, other):
"""Compare two keys for equality.
Incomplete keys never compare equal to any other key.
Completed keys compare equal if they have the same path, project,
and namespace.
:rtype: bool
:returns: True if the keys compare equal, else False.
"""
if not isinstance(other, Key):
return NotImplemented
if self.is_partial or other.is_partial:
return False
return (self.flat_path == other.flat_path and
self.project == other.project and
self.namespace == other.namespace)
def __ne__(self, other):
"""Compare two keys for inequality.
Incomplete keys never compare equal to any other key.
Completed keys compare equal if they have the same path, project,
and namespace.
:rtype: bool
:returns: False if the keys compare equal, else True.
"""
return not self == other
def __hash__(self):
"""Hash a keys for use in a dictionary lookp.
:rtype: int
:returns: a hash of the key's state.
"""
return (hash(self.flat_path) +
hash(self.project) +
hash(self.namespace))
@staticmethod
def _parse_path(path_args):
"""Parses positional arguments into key path with kinds and IDs.
:type path_args: tuple
:param path_args: A tuple from positional arguments. Should be
alternating list of kinds (string) and ID/name
parts (int or string).
:rtype: :class:`list` of :class:`dict`
:returns: A list of key parts with kind and ID or name set.
:raises: :class:`ValueError` if there are no ``path_args``, if one of
the kinds is not a string or if one of the IDs/names is not
a string or an integer.
"""
if len(path_args) == 0:
raise ValueError('Key path must not be empty.')
kind_list = path_args[::2]
id_or_name_list = path_args[1::2]
# Dummy sentinel value to pad incomplete key to even length path.
partial_ending = object()
if len(path_args) % 2 == 1:
id_or_name_list += (partial_ending,)
result = []
for kind, id_or_name in zip(kind_list, id_or_name_list):
curr_key_part = {}
if isinstance(kind, six.string_types):
curr_key_part['kind'] = kind
else:
raise ValueError(kind, 'Kind was not a string.')
if isinstance(id_or_name, six.string_types):
curr_key_part['name'] = id_or_name
elif isinstance(id_or_name, six.integer_types):
curr_key_part['id'] = id_or_name
elif id_or_name is not partial_ending:
raise ValueError(id_or_name,
'ID/name was not a string or integer.')
result.append(curr_key_part)
return result
def _combine_args(self):
"""Sets protected data by combining raw data set from the constructor.
If a ``_parent`` is set, updates the ``_flat_path`` and sets the
``_namespace`` and ``_project`` if not already set.
:rtype: :class:`list` of :class:`dict`
:returns: A list of key parts with kind and ID or name set.
:raises: :class:`ValueError` if the parent key is not complete.
"""
child_path = self._parse_path(self._flat_path)
if self._parent is not None:
if self._parent.is_partial:
raise ValueError('Parent key must be complete.')
# We know that _parent.path() will return a copy.
child_path = self._parent.path + child_path
self._flat_path = self._parent.flat_path + self._flat_path
if (self._namespace is not None and
self._namespace != self._parent.namespace):
raise ValueError('Child namespace must agree with parent\'s.')
self._namespace = self._parent.namespace
if (self._project is not None and
self._project != self._parent.project):
raise ValueError('Child project must agree with parent\'s.')
self._project = self._parent.project
return child_path
def _clone(self):
"""Duplicates the Key.
Most attributes are simple types, so don't require copying. Other
attributes like ``parent`` are long-lived and so we re-use them.
:rtype: :class:`google.cloud.datastore.key.Key`
:returns: A new ``Key`` instance with the same data as the current one.
"""
cloned_self = self.__class__(*self.flat_path,
project=self.project,
namespace=self.namespace)
# If the current parent has already been set, we re-use
# the same instance
cloned_self._parent = self._parent
return cloned_self
def completed_key(self, id_or_name):
"""Creates new key from existing partial key by adding final ID/name.
:type id_or_name: str or integer
:param id_or_name: ID or name to be added to the key.
:rtype: :class:`google.cloud.datastore.key.Key`
:returns: A new ``Key`` instance with the same data as the current one
and an extra ID or name added.
:raises: :class:`ValueError` if the current key is not partial or if
``id_or_name`` is not a string or integer.
"""
if not self.is_partial:
raise ValueError('Only a partial key can be completed.')
if isinstance(id_or_name, six.string_types):
id_or_name_key = 'name'
elif isinstance(id_or_name, six.integer_types):
id_or_name_key = 'id'
else:
raise ValueError(id_or_name,
'ID/name was not a string or integer.')
new_key = self._clone()
new_key._path[-1][id_or_name_key] = id_or_name
new_key._flat_path += (id_or_name,)
return new_key
def to_protobuf(self):
"""Return a protobuf corresponding to the key.
:rtype: :class:`.entity_pb2.Key`
:returns: The protobuf representing the key.
"""
key = _entity_pb2.Key()
key.partition_id.project_id = self.project
if self.namespace:
key.partition_id.namespace_id = self.namespace
for item in self.path:
element = key.path.add()
if 'kind' in item:
element.kind = item['kind']
if 'id' in item:
element.id = item['id']
if 'name' in item:
element.name = item['name']
return key
def to_legacy_urlsafe(self, location_prefix=None):
"""Convert to a base64 encode urlsafe string for App Engine.
This is intended to work with the "legacy" representation of a
datastore "Key" used within Google App Engine (a so-called
"Reference"). The returned string can be used as the ``urlsafe``
argument to ``ndb.Key(urlsafe=...)``. The base64 encoded values
will have padding removed.
.. note::
The string returned by ``to_legacy_urlsafe`` is equivalent, but
not identical, to the string returned by ``ndb``. The location
prefix may need to be specified to obtain identical urlsafe
keys.
:type location_prefix: str
:param location_prefix: The location prefix of an App Engine project
ID. Often this value is 's~', but may also be
'e~', or other location prefixes currently
unknown.
:rtype: bytes
:returns: A bytestring containing the key encoded as URL-safe base64.
"""
if location_prefix is None:
project_id = self.project
else:
project_id = location_prefix + self.project
reference = _app_engine_key_pb2.Reference(
app=project_id,
path=_to_legacy_path(self._path), # Avoid the copy.
name_space=self.namespace,
)
raw_bytes = reference.SerializeToString()
return base64.urlsafe_b64encode(raw_bytes).strip(b'=')
@classmethod
def from_legacy_urlsafe(cls, urlsafe):
"""Convert urlsafe string to :class:`~google.cloud.datastore.key.Key`.
This is intended to work with the "legacy" representation of a
datastore "Key" used within Google App Engine (a so-called
"Reference"). This assumes that ``urlsafe`` was created within an App
Engine app via something like ``ndb.Key(...).urlsafe()``.
:type urlsafe: bytes or unicode
:param urlsafe: The base64 encoded (ASCII) string corresponding to a
datastore "Key" / "Reference".
:rtype: :class:`~google.cloud.datastore.key.Key`.
:returns: The key corresponding to ``urlsafe``.
"""
urlsafe = _to_bytes(urlsafe, encoding='ascii')
padding = b'=' * (-len(urlsafe) % 4)
urlsafe += padding
raw_bytes = base64.urlsafe_b64decode(urlsafe)
reference = _app_engine_key_pb2.Reference()
reference.ParseFromString(raw_bytes)
project = _clean_app(reference.app)
namespace = _get_empty(reference.name_space, u'')
_check_database_id(reference.database_id)
flat_path = _get_flat_path(reference.path)
return cls(*flat_path, project=project, namespace=namespace)
@property
def is_partial(self):
"""Boolean indicating if the key has an ID (or name).
:rtype: bool
:returns: ``True`` if the last element of the key's path does not have
an ``id`` or a ``name``.
"""
return self.id_or_name is None
@property
def namespace(self):
"""Namespace getter.
:rtype: str
:returns: The namespace of the current key.
"""
return self._namespace
@property
def path(self):
"""Path getter.
Returns a copy so that the key remains immutable.
:rtype: :class:`list` of :class:`dict`
:returns: The (key) path of the current key.
"""
return copy.deepcopy(self._path)
@property
def flat_path(self):
"""Getter for the key path as a tuple.
:rtype: tuple of string and integer
:returns: The tuple of elements in the path.
"""
return self._flat_path
@property
def kind(self):
"""Kind getter. Based on the last element of path.
:rtype: str
:returns: The kind of the current key.
"""
return self.path[-1]['kind']
@property
def id(self):
"""ID getter. Based on the last element of path.
:rtype: int
:returns: The (integer) ID of the key.
"""
return self.path[-1].get('id')
@property
def name(self):
"""Name getter. Based on the last element of path.
:rtype: str
:returns: The (string) name of the key.
"""
return self.path[-1].get('name')
@property
def id_or_name(self):
"""Getter. Based on the last element of path.
:rtype: int (if ``id``) or string (if ``name``)
:returns: The last element of the key's path if it is either an ``id``
or a ``name``.
"""
return self.id or self.name
@property
def project(self):
"""Project getter.
:rtype: str
:returns: The key's project.
"""
return self._project
def _make_parent(self):
"""Creates a parent key for the current path.
Extracts all but the last element in the key path and creates a new
key, while still matching the namespace and the project.
:rtype: :class:`google.cloud.datastore.key.Key` or :class:`NoneType`
:returns: A new ``Key`` instance, whose path consists of all but the
last element of current path. If the current key has only
one path element, returns ``None``.
"""
if self.is_partial:
parent_args = self.flat_path[:-1]
else:
parent_args = self.flat_path[:-2]
if parent_args:
return self.__class__(*parent_args, project=self.project,
namespace=self.namespace)
@property
def parent(self):
"""The parent of the current key.
:rtype: :class:`google.cloud.datastore.key.Key` or :class:`NoneType`
:returns: A new ``Key`` instance, whose path consists of all but the
last element of current path. If the current key has only
one path element, returns ``None``.
"""
if self._parent is None:
self._parent = self._make_parent()
return self._parent
def __repr__(self):
return '<Key%s, project=%s>' % (self._flat_path, self.project)
def _validate_project(project, parent):
"""Ensure the project is set appropriately.
If ``parent`` is passed, skip the test (it will be checked / fixed up
later).
If ``project`` is unset, attempt to infer the project from the environment.
:type project: str
:param project: A project.
:type parent: :class:`google.cloud.datastore.key.Key`
:param parent: (Optional) The parent of the key or ``None``.
:rtype: str
:returns: The ``project`` passed in, or implied from the environment.
:raises: :class:`ValueError` if ``project`` is ``None`` and no project
can be inferred from the parent.
"""
if parent is None:
if project is None:
raise ValueError("A Key must have a project set.")
return project
def _clean_app(app_str):
"""Clean a legacy (i.e. from App Engine) app string.
:type app_str: str
:param app_str: The ``app`` value stored in a "Reference" pb.
:rtype: str
:returns: The cleaned value.
"""
parts = app_str.split('~', 1)
return parts[-1]
def _get_empty(value, empty_value):
"""Check if a protobuf field is "empty".
:type value: object
:param value: A basic field from a protobuf.
:type empty_value: object
:param empty_value: The "empty" value for the same type as
``value``.
"""
if value == empty_value:
return None
else:
return value
def _check_database_id(database_id):
"""Make sure a "Reference" database ID is empty.
:type database_id: unicode
:param database_id: The ``database_id`` field from a "Reference" protobuf.
:raises: :exc:`ValueError` if the ``database_id`` is not empty.
"""
if database_id != u'':
msg = _DATABASE_ID_TEMPLATE.format(database_id)
raise ValueError(msg)
def _add_id_or_name(flat_path, element_pb, empty_allowed):
"""Add the ID or name from an element to a list.
:type flat_path: list
:param flat_path: List of accumulated path parts.
:type element_pb: :class:`._app_engine_key_pb2.Path.Element`
:param element_pb: The element containing ID or name.
:type empty_allowed: bool
:param empty_allowed: Indicates if neither ID or name need be set. If
:data:`False`, then **exactly** one of them must be.
:raises: :exc:`ValueError` if 0 or 2 of ID/name are set (unless
``empty_allowed=True`` and 0 are set).
"""
id_ = element_pb.id
name = element_pb.name
# NOTE: Below 0 and the empty string are the "null" values for their
# respective types, indicating that the value is unset.
if id_ == 0:
if name == u'':
if not empty_allowed:
raise ValueError(_EMPTY_ELEMENT)
else:
flat_path.append(name)
else:
if name == u'':
flat_path.append(id_)
else:
msg = _BAD_ELEMENT_TEMPLATE.format(id_, name)
raise ValueError(msg)
def _get_flat_path(path_pb):
"""Convert a legacy "Path" protobuf to a flat path.
For example
Element {
type: "parent"
id: 59
}
Element {
type: "child"
name: "naem"
}
would convert to ``('parent', 59, 'child', 'naem')``.
:type path_pb: :class:`._app_engine_key_pb2.Path`
:param path_pb: Legacy protobuf "Path" object (from a "Reference").
:rtype: tuple
:returns: The path parts from ``path_pb``.
"""
num_elts = len(path_pb.element)
last_index = num_elts - 1
result = []
for index, element in enumerate(path_pb.element):
result.append(element.type)
_add_id_or_name(result, element, index == last_index)
return tuple(result)
def _to_legacy_path(dict_path):
"""Convert a tuple of ints and strings in a legacy "Path".
.. note:
This assumes, but does not verify, that each entry in
``dict_path`` is valid (i.e. doesn't have more than one
key out of "name" / "id").
:type dict_path: lsit
:param dict_path: The "structured" path for a key, i.e. it
is a list of dictionaries, each of which has
"kind" and one of "name" / "id" as keys.
:rtype: :class:`._app_engine_key_pb2.Path`
:returns: The legacy path corresponding to ``dict_path``.
"""
elements = []
for part in dict_path:
element_kwargs = {'type': part['kind']}
if 'id' in part:
element_kwargs['id'] = part['id']
elif 'name' in part:
element_kwargs['name'] = part['name']
element = _app_engine_key_pb2.Path.Element(**element_kwargs)
elements.append(element)
return _app_engine_key_pb2.Path(element=elements)
|
apache-2.0
|
jjhelmus/scipy
|
scipy/fftpack/pseudo_diffs.py
|
36
|
14318
|
"""
Differential and pseudo-differential operators.
"""
# Created by Pearu Peterson, September 2002
from __future__ import division, print_function, absolute_import
__all__ = ['diff',
'tilbert','itilbert','hilbert','ihilbert',
'cs_diff','cc_diff','sc_diff','ss_diff',
'shift']
from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj
from . import convolve
from scipy.fftpack.basic import _datacopied
import atexit
atexit.register(convolve.destroy_convolve_cache)
del atexit
_cache = {}
def diff(x,order=1,period=None, _cache=_cache):
"""
Return k-th derivative (or integral) of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j
y_0 = 0 if order is not 0.
Parameters
----------
x : array_like
Input array.
order : int, optional
The order of differentiation. Default order is 1. If order is
negative, then integration is carried out under the assumption
that ``x_0 == 0``.
period : float, optional
The assumed period of the sequence. Default is ``2*pi``.
Notes
-----
If ``sum(x, axis=0) = 0`` then ``diff(diff(x, k), -k) == x`` (within
numerical accuracy).
For odd order and even ``len(x)``, the Nyquist mode is taken zero.
"""
tmp = asarray(x)
if order == 0:
return tmp
if iscomplexobj(tmp):
return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period)
if period is not None:
c = 2*pi/period
else:
c = 1.0
n = len(x)
omega = _cache.get((n,order,c))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,order=order,c=c):
if k:
return pow(c*k,order)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=order,
zero_nyquist=1)
_cache[(n,order,c)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=order % 2,
overwrite_x=overwrite_x)
del _cache
_cache = {}
def tilbert(x, h, period=None, _cache=_cache):
"""
Return h-Tilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
The input array to transform.
h : float
Defines the parameter of the Tilbert transform.
period : float, optional
The assumed period of the sequence. Default period is ``2*pi``.
Returns
-------
tilbert : ndarray
The result of the transform.
Notes
-----
If ``sum(x, axis=0) == 0`` and ``n = len(x)`` is odd then
``tilbert(itilbert(x)) == x``.
If ``2 * pi * h / period`` is approximately 10 or larger, then
numerically ``tilbert == hilbert``
(theoretically oo-Tilbert == Hilbert).
For even ``len(x)``, the Nyquist mode of ``x`` is taken zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return tilbert(tmp.real, h, period) + \
1j * tilbert(tmp.imag, h, period)
if period is not None:
h = h * 2 * pi / period
n = len(x)
omega = _cache.get((n, h))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k, h=h):
if k:
return 1.0/tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n, kernel, d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def itilbert(x,h,period=None, _cache=_cache):
"""
Return inverse h-Tilbert transform of a periodic sequence x.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j
y_0 = 0
For more details, see `tilbert`.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return itilbert(tmp.real,h,period) + \
1j*itilbert(tmp.imag,h,period)
if period is not None:
h = h*2*pi/period
n = len(x)
omega = _cache.get((n,h))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,h=h):
if k:
return -tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def hilbert(x, _cache=_cache):
"""
Return Hilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sign(j) * x_j
y_0 = 0
Parameters
----------
x : array_like
The input array, should be periodic.
_cache : dict, optional
Dictionary that contains the kernel used to do a convolution with.
Returns
-------
y : ndarray
The transformed input.
See Also
--------
scipy.signal.hilbert : Compute the analytic signal, using the Hilbert
transform.
Notes
-----
If ``sum(x, axis=0) == 0`` then ``hilbert(ihilbert(x)) == x``.
For even len(x), the Nyquist mode of x is taken zero.
The sign of the returned transform does not have a factor -1 that is more
often than not found in the definition of the Hilbert transform. Note also
that `scipy.signal.hilbert` does have an extra -1 factor compared to this
function.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return hilbert(tmp.real)+1j*hilbert(tmp.imag)
n = len(x)
omega = _cache.get(n)
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k):
if k > 0:
return 1.0
elif k < 0:
return -1.0
return 0.0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[n] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
def ihilbert(x):
"""
Return inverse Hilbert transform of a periodic sequence x.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*sign(j) * x_j
y_0 = 0
"""
return -hilbert(x)
_cache = {}
def cs_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a, b : float
Defines the parameters of the cosh/sinh pseudo-differential
operator.
period : float, optional
The period of the sequence. Default period is ``2*pi``.
Returns
-------
cs_diff : ndarray
Pseudo-derivative of periodic sequence `x`.
Notes
-----
For even len(`x`), the Nyquist mode of `x` is taken as zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cs_diff(tmp.real,a,b,period) + \
1j*cs_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return -cosh(a*k)/sinh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def sc_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
Input array.
a,b : float
Defines the parameters of the sinh/cosh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is 2*pi.
Notes
-----
``sc_diff(cs_diff(x,a,b),b,a) == x``
For even ``len(x)``, the Nyquist mode of x is taken as zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return sc_diff(tmp.real,a,b,period) + \
1j*sc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return sinh(a*k)/cosh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def ss_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = a/b * x_0
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a,b
Defines the parameters of the sinh/sinh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is ``2*pi``.
Notes
-----
``ss_diff(ss_diff(x,a,b),b,a) == x``
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return ss_diff(tmp.real,a,b,period) + \
1j*ss_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return sinh(a*k)/sinh(b*k)
return float(a)/b
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def cc_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a,b : float
Defines the parameters of the sinh/sinh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is ``2*pi``.
Returns
-------
cc_diff : ndarray
Pseudo-derivative of periodic sequence `x`.
Notes
-----
``cc_diff(cc_diff(x,a,b),b,a) == x``
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cc_diff(tmp.real,a,b,period) + \
1j*cc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
return cosh(a*k)/cosh(b*k)
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def shift(x, a, period=None, _cache=_cache):
"""
Shift periodic sequence x by a: y(u) = x(u+a).
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a : float
Defines the parameters of the sinh/sinh pseudo-differential
period : float, optional
The period of the sequences x and y. Default period is ``2*pi``.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period)
if period is not None:
a = a*2*pi/period
n = len(x)
omega = _cache.get((n,a))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel_real(k,a=a):
return cos(a*k)
def kernel_imag(k,a=a):
return sin(a*k)
omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0,
zero_nyquist=0)
omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1,
zero_nyquist=0)
_cache[(n,a)] = omega_real,omega_imag
else:
omega_real,omega_imag = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve_z(tmp,omega_real,omega_imag,
overwrite_x=overwrite_x)
del _cache
|
bsd-3-clause
|
mdunschen/AirQualityTweeter
|
aqliverpool.py
|
1
|
24825
|
#!/usr/bin/python3
# coding: utf-8
from optparse import OptionParser
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.patches as patches
import matplotlib.patheffects as patheffects
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import AutoMinorLocator
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.animation import FuncAnimation
from matplotlib.text import Annotation, Text
import numpy as np
import urllib
from urllib import request
import re
import html
import sys, os
import pickle
from datetime import datetime, timedelta
import tweepy
import sqlite3
import math
from collections import deque
apatch = None
urlstr = "https://uk-air.defra.gov.uk/latest/currentlevels?view=site#L"
shorturlstr = "https://goo.gl/ZpELjS"
urlWHO = "http://apps.who.int/iris/bitstream/10665/69477/1/WHO_SDE_PHE_OEH_06.02_eng.pdf"
sitename = b'Liverpool'
mgm3 = '\u03BCgm\u207B\u00B3'
O3, NO2, SO2, PM25, PM100 = "O\u2083", "NO\u2082", "SO\u2082", "PM\u2082\u2085", "PM\u2081\u2080\u2080"
guides = {O3:100, NO2:200, SO2:20, PM25:25, PM100:50} # source: http://apps.who.int/iris/bitstream/10665/69477/1/WHO_SDE_PHE_OEH_06.02_eng.pdf
meansWHO = {O3:'8h', NO2:'1h', SO2:'10m', PM25:'24h', PM100:'24h'}
meansDEFRA = {O3:'8h', NO2:'1h', SO2:'max 15m', PM25:'24h', PM100:'24h'}
consumer_key, consumer_secret, access_token, access_token_secret = None, None, None, None
def loadAPIKeys():
global consumer_key, consumer_secret, access_token, access_token_secret
if os.path.isfile("apikeys.bin"):
consumer_key, consumer_secret, access_token, access_token_secret = pickle.load(open("apikeys.bin", "rb"))
else:
consumer_key = input("consumer_key: ")
consumer_secret = input("consumer_secret: ")
access_token = input("access_token: ")
access_token_secret = input("access_token_secret: ")
pickle.dump((consumer_key, consumer_secret, access_token, access_token_secret), open("apikeys.bin", "wb"))
def twitterAPI():
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
def tweet(status, replyto=None, imgfilename=None):
if not (status or imgfilename):
return
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', status)
# take out all url texts from status for count, all urls count as 23
rstat = status
for u in urls:
rstat = rstat.replace(u, '')
nchars = len(rstat) + 23 * len(urls)
if nchars > 140:
print("Tweet too long")
#print(status)
api = twitterAPI()
if (imgfilename and os.path.isfile(imgfilename)):
try:
stat = api.update_with_media(imgfilename, status=status, in_reply_to_status_id=(replyto and replyto.id))
except Exception as e:
print(e)
stat = None
else:
try:
stat = api.update_status(status=status, in_reply_to_status_id=(replyto and replyto.id))
except Exception as e:
print(e)
stat = None
return stat
def compose(day, clock, reading):
status = ["%s, %s (%s)" % (day, clock, mgm3)]
skeys = list(reading.keys())
skeys.sort()
for k in skeys:
if reading[k][0] == "n/a":
status.append("%s: %s" % (k, reading[k][0]))
else:
status.append("%s: %.0f %s" % (k, reading[k][0], reading[k][1]))
status.append("%s" % shorturlstr)
status = '\n'.join(status)
return status
def toDT(day, clock):
if clock[:5] == "24:00": # 27/01/2017 24:00 is in fact 28/01/2017 00:00
clock = "00:00"
day = (datetime.strptime(day, "%d/%m/%Y") + timedelta(hours=24)).strftime("%d/%m/%Y")
return datetime.strptime("%s %s" % (day, clock[:5]), "%d/%m/%Y %H:%M")
def composeAboveTweet(day, clock, above, origtweetstat):
status = []
dtnow = toDT(day, clock)
for k in above:
# count hours above
#print("In composeAboveTweet", k, above[k])
lday, lclock, lvalue = above[k][0]
if lday == day and lclock == clock:
stat = []
# count hours above
dtlast = dtnow
nhours = 1
for lday, lclock, lvalue in above[k][1:]:
if lday == day and lclock == clock:
continue # skip duplicate entries
dt = toDT(lday, lclock)
if (dtlast - dt) == timedelta(hours=1):
nhours += 1
else:
break
dtlast = dt
stat.append("@lpoolcouncil @DefraUKAir @LiverpoolFoE: %s %dh above @WHO guide (%.0f%s %s-mean %s) #airpollution #liverpool" %
(k, nhours, guides[k], mgm3, meansWHO[k], urlWHO))
if meansWHO[k] != meansDEFRA[k]:
stat.append("(Note #DEFRA data is %s mean)" % meansDEFRA[k])
status.append('\n'.join(stat))
return status
def scrape():
f = request.urlopen(urlstr)
r = f.read()
g = re.search(b".*<tr>.*(%s.*?)</tr>" % sitename, r, re.DOTALL)
#print(g.group(1))
# split into <td></td>
row = g.group(1)
#print("row = %s\n" % row)
# date and time
dategroups = re.search(b".*<td>(.*?)<br.*?>(.*?)</td>", row, re.DOTALL)
day = dategroups.group(1).decode("utf-8")
clock = dategroups.group(2).decode("utf-8")
# data
cols = re.findall(b"<span.*?>(.*?)</span>", row, re.DOTALL)
assert len(cols) == 5
units = [O3, NO2, SO2, PM25, PM100]
datanums = []
for v in cols:
value = 'not_set'
if b' ' in v:
try:
value = float(v[:v.index(b' ')])
except ValueError:
pass
if value == 'not_set' and b'n/a' in v:
value = "n/a"
else:
value = float(v[:v.index(b'&')])
nv = v.replace(b' ', b' ')
ix = b''
m = re.match(b".*?(\(.*?\))", nv)
if m:
ix = re.match(b".*?(\(.*?\))", nv).group(1)
datanums.append((value, ix.decode("utf-8")))
reading = dict(zip(units, datanums))
return day, clock, reading
def convert(r):
# converts result from sqlite query to format we scrape of day, time, readings
# where readings is a dict with keys "units" and tuples as values
units = [O3, NO2, SO2, PM25, PM100]
converted = deque()
for e in r:
dt = datetime.strptime(e[1], "%Y-%m-%d %H:%M:%S.%f")
date = dt.strftime("%d/%m/%Y")
clock = dt.strftime("%H:%M:%S")
tpls = []
for v in e[2:]:
if v[:3] == "n/a":
tpls.append(("n/a", ''))
else:
m = re.match("(.*?)(\(.*?\))", v)
tpls.append((float(m.group(1)), m.group(2)))
assert len(tpls) == 5
converted.appendleft((date, clock, dict(zip(units, tpls))))
return converted
def loadAllReadings(dbname):
db = sqlite3.connect(dbname)
c = db.cursor()
c.execute("SELECT * FROM readings")
return c.fetchall()
def loadLastReading(dbname):
db = sqlite3.connect(dbname)
c = db.cursor()
c.execute("SELECT * FROM readings WHERE id in ( SELECT max(id) FROM readings)")
return c.fetchall()
def loadReadings():
fall = "allreadings.bin"
allreadings = deque()
if os.path.isfile(fall):
allreadings = pickle.load(open(fall, "rb"))
return allreadings
def saveLastReading(dbname, date, time, reading, overwrt=False):
units = [O3, NO2, SO2, PM25, PM100]
db = sqlite3.connect(dbname)
c = db.cursor()
if overwrt:
c.execute(''' DROP TABLE IF EXISTS readings''')
e = '''
CREATE TABLE IF NOT EXISTS readings(id INTEGER PRIMARY KEY, date_time TEXT, %s TEXT, %s TEXT, %s TEXT, %s TEXT, %s TEXT)
''' % tuple(units)
c.execute(e)
dt = datetime.strptime("%s %s" % (date, time), "%d/%m/%Y %H:%M:%S")
dts = dt.strftime("%Y-%m-%d %H:%M:%S.000")
c.execute("SELECT * FROM readings WHERE date_time=?", (dts,))
r = c.fetchall()
if r:
print("Already exists")
return
e = '''INSERT INTO readings(date_time, %s, %s, %s, %s, %s) VALUES(?,?,?,?,?,?)''' % tuple(units)
t = (dts, "%s %s" % reading[O3], "%s %s"% reading[NO2], "%s %s" % reading[SO2], "%s %s" % reading[PM25], "%s %s" % reading[PM100])
c.execute(e, t)
db.commit()
db.close()
def pickleReadings(allreading):
fall = "allreadings.bin"
pickle.dump(allreadings, open(fall, "wb"))
def compareWHO(allreadings):
above = {}
for (day, clock, reading) in allreadings:
for k in guides:
if type(reading[k][0]) == type(1.0) and reading[k][0] > guides[k]:
if k not in above:
above[k] = []
above[k].append((day,clock, reading[k][0]))
return above
def weatherTweetToDict(t):
m = re.match(".*AirTemp ([\+\-0-9.]*).*?, RH ([0-9]*?)\%, wind speed ([0-9.]*) m\/s, wind dir ([0-9.]*?) deg, Time ([0-9:]*?)UTC", t.text)
if m:
try:
d = {"temp": float(m.group(1)), "rh": int(m.group(2)), "windspeed": float(m.group(3)), "winddir": float(m.group(4)), "time": m.group(5)}
d["datetime"] = t.created_at
d["tweet"] = t
return d
except Exception as e:
print(t.text)
raise e
def getAndPickleWeather(fn, readings):
api = twitterAPI()
oldestReading = toDT(readings[-1][0], readings[-1][1])
idlast = None
alltweets = []
while True:
if 0:#idlast == None:
r = api.user_timeline("@livuniwx")
else:
r = api.user_timeline("@livuniwx", max_id=idlast)
for i, t in enumerate(r[:-1]):
d = weatherTweetToDict(t)
if d:
alltweets.append(d)
if r[-1].created_at < oldestReading:
break
idlast = r[-1].id
pickle.dump(alltweets, open(fn, "wb"))
print("Pickled ", len(alltweets), " tweets")
def loadWeatherTweets(fn):
wt = pickle.load(open(fn, "rb"))
d0 = wt[0]["datetime"]
for t in wt[1:]:
assert t["datetime"] < d0
d0 = t["datetime"]
return wt
def testCMap():
# colourmap from green over yellow to red
cdict = {
'red' : ((0.00, 0.00, 0.00),
(0.50, 1.00, 1.00),
(1.00, 1.00, 1.00)),
'green': ((0.00, 1.00, 1.00),
(0.50, 1.00, 1.00),
(1.00, 0.00, 0.00)),
'blue' : ((0.00, 0.00, 0.00),
(0.50, 0.00, 0.00),
(1.00, 0.00, 0.00)),
}
cm = LinearSegmentedColormap("mymap", cdict, 256)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
fig, axes = plt.subplots(nrows=1)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)
#axes[0].set_title(cmap_category + ' colormaps', fontsize=14)
axes.imshow(gradient, aspect='auto', cmap=cm)
pos = list(axes.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
#fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
axes.set_axis_off()
plt.show()
def Along(lam, a, b):
return lam * b + (1.0 - lam) * a
class Gauge:
def __init__(self, dates, data, C):
self.dates = dates
self.data = data
self.C = C
self.titles = {O3: r"$O_3$", NO2: r"$NO_2$", SO2: r"$SO_2$", PM25: r"$PM_{2.5}$", PM100: r"$PM_{10}$"}
self.mgpqm = "${\mu gm^{-3}}$"
self.maxValue = None
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
self.ax.set_xlim([-1.2, 1.2])
self.ax.set_ylim([-0.2, 1.2])
self.ax.set_aspect("equal")
plt.axis('off')
circle = patches.Circle((0, 0), 0.06, color="orange", path_effects=[patheffects.SimplePatchShadow(), patheffects.Normal()])
circle.zorder = 200
self.ax.add_artist(circle)
# 50% available for valmin to valmax, where is limit
self.valmin = 0
self.valmax = 1.2 * guides[C]
self.wholimit = guides[C]
self.rad = 0.9
lim = 180.0*(1.0 - self.toDialPos(self.wholimit)[2] / math.pi)
wedgeBelow = patches.Wedge((0, 0), 1.0, lim, 180.0, color=(0.8, 1, 0.8))
wedgeAbove = patches.Wedge((0, 0), 1.0, 0.0, lim, color=(1, 0.8, 0.8))
self.ax.add_patch(wedgeBelow)
self.ax.add_patch(wedgeAbove)
self.apatch = None
self.maxArtist = None
self.lastValue = 0.0
self.addLabels()
def toDialPos(self, value):
theta = ((value - self.valmin) / (self.valmax - self.valmin)) * math.pi
sx, sy = -self.rad * math.cos(theta), self.rad * math.sin(theta)
return sx, sy, theta
def drawGauge(self, frame):
# transform value to angle between 0=valmin and 180=valmax
value = self.data[frame]
dialColor = "orange"
if value == "n/a":
value = self.lastValue
dialColor = "grey"
self.lastValue = value
sx, sy, theta = self.toDialPos(value)
if self.apatch:
self.apatch.remove()
arrow = patches.FancyArrow(0, 0, sx, sy, color=dialColor, width=0.05, length_includes_head=True, head_width=0.07, path_effects=[patheffects.SimplePatchShadow(), patheffects.Normal()])
self.apatch = self.ax.add_patch(arrow)
self.apatch.zorder = 100
# draw the max value
if self.maxValue == None or value > self.maxValue:
rx, ry = -(self.rad+0.07) * math.cos(theta), (self.rad+0.07) * math.sin(theta)
tx, ty = 0.07 * math.cos(theta), -0.07 * math.sin(theta)
arrow = patches.FancyArrow(rx, ry, tx, ty, color="red", width=0.0, length_includes_head=True, head_width=0.07, path_effects=[patheffects.SimplePatchShadow(), patheffects.Normal()])
if self.maxValue != None:
self.aMaxPatch.remove()
self.aMaxPatch = self.ax.add_patch(arrow)
self.maxValue = value
self.maximTitle = "\n Maximum: %.1f%s, %s" % (self.maxValue, self.mgpqm, self.dates[frame].strftime("%d/%m/%Y %H:%M"))
if self.maxArtist:
self.maxArtist.remove()
if dialColor == "grey":
self.ax.set_title(self.titles[self.C] + " %s" % self.dates[frame].strftime("%d/%m/%Y %H:%M"), fontsize=12)
self.maxArtist = self.ax.add_artist(Text(0, 1.25 * self.rad, text="No readings recorded!", verticalalignment='baseline', horizontalalignment='center'))
else:
self.ax.set_title(self.titles[self.C] + " %s" % self.dates[frame].strftime("%d/%m/%Y %H:%M"), fontsize=12)
self.maxArtist = self.ax.add_artist(Text(0, 1.25 * self.rad, text="%s" % (self.maximTitle), verticalalignment='baseline', horizontalalignment='center'))
def addLabels(self):
# numbers around the top
for i in range(11):
value = Along(i/10.0, self.valmin, self.valmax)
sx, sy, theta = self.toDialPos(value)
self.ax.add_artist(Text(sx, sy, text="%.0f" % value, verticalalignment='baseline', horizontalalignment='center', rotation=90.0 - math.degrees(theta)))
# label what we are showing
self.ax.add_artist(Text(0, self.rad/2, text="%s\n[%s]" % (self.titles[self.C], self.mgpqm), verticalalignment='baseline', horizontalalignment='center'))
# WHO guide information
self.ax.add_artist(Text(0, -0.2 * self.rad, text="WHO Limit: %s%s" % (guides[self.C], self.mgpqm), verticalalignment='baseline', horizontalalignment='center', color=(1, 0.8, 0.8)))
def plotRadial(readings, C):
dates = [toDT(d, c) for d, c, r in readings]
data = [r[C][0] for d, c, r in readings] # data
d0, d1 = dates[0], dates[-1] # date range
gauge = Gauge(dates, data, C)
framlist = [0, 0, 0, 0, 0, 0, 0, 0]
framlist.extend(range(len(data)))
anim = FuncAnimation(gauge.fig, gauge.drawGauge, frames=framlist, interval=200)
fn = "gauge_%s.gif" % d1.strftime("%Y%m%d%H%M")
anim.save(fn, dpi=100, writer='imagemagick')
plt.close(gauge.fig)
return fn
def plotLinear(readings, C):
titles = {O3: r"$O_3$", NO2: r"$NO_2$", SO2: r"$SO_2$", PM25: r"$PM_{2.5}$", PM100: r"$PM_{10}$"}
dates = [toDT(d, c) for d, c, r in readings]
data = [r[C][0] for d, c, r in readings] # data
newdates, newdata = [], []
for date, val in zip(dates, data):
if val != 'n/a':
newdates.append(date)
newdata.append(val)
data = newdata
dates = newdates
d0, d1 = dates[0], dates[-1] # date range
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# format x axis
ax.xaxis_date()
ax.set_xlim(d0, d1)
ax.xaxis.set_minor_formatter(mdates.DateFormatter('%Hh'))
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
# format y axis
ax.xaxis.set_major_formatter(mdates.DateFormatter('%a %d/%m'))
ax.yaxis.set_major_formatter(FormatStrFormatter(r'%.0f$\frac{\mu g}{m^3}$'))
ax.set_ylim(0, max(data) + 5)
# green / red background division above and below WHO guide
guide = guides[C]
ax.fill_between([d0, d1, d1, d0], [0, 0, guide, guide], facecolor=(0.8, 1, 0.8), edgecolor="none")
ax.fill_between([d0, d1, d1, d0], [guide, guide, max(data) + 5, max(data) + 5], facecolor=(1, 0.8, 0.8), edgecolor="none")
ax.scatter(dates, data)
ax.set_title(titles[C] + " for %s to %s,\nLiverpool Speke (%s)" % (d0.strftime("%d/%m/%Y"), d1.strftime("%d/%m/%Y"), urlstr), fontsize=10)
ax.tick_params(axis='both', which='both', labelsize=10)
fig.autofmt_xdate()
plt.grid(which='major')
fn = "figure_%s.png" % d1.strftime("%Y%m%d")
plt.savefig(fn, dpi=600)
return fn
def plotPolar(readings, weathertweets):
def findInWT(dt, wt):
for t in wt:
if t["datetime"] - dt < timedelta(minutes=10):
return t
assert 0
# pair pollution readings with weather data
pm25 = []
pm100 = []
windspeed = []
winddir = []
dates = []
for r in readings:
d, c, rr = r
dt = toDT(d, c)
# find dt in wt
w = findInWT(dt, weathertweets)
dates.append(dt)
if type(rr[PM25][0]) != type(''):
pm25.append(rr[PM25][0])
windspeed.append(w["windspeed"])
winddir.append(w["winddir"])
#if type(rr[PM100][0]) != type(''):
# pm100.append(rr[PM100][0])
theta = np.radians(winddir)
# colourmap from green over yellow to red
cdict = {
'red' : ((0.00, 0.00, 0.00),
(0.50, 1.00, 1.00),
(1.00, 1.00, 1.00)),
'green': ((0.00, 1.00, 1.00),
(0.50, 1.00, 1.00),
(1.00, 0.00, 0.00)),
'blue' : ((0.00, 0.00, 0.00),
(0.50, 0.00, 0.00),
(1.00, 0.00, 0.00)),
}
cm = LinearSegmentedColormap("greentored", cdict, 256)
ax = plt.subplot(111, projection='polar')
ax.scatter(theta, windspeed, c=pm25, s=100, cmap=cm, edgecolors='none')
ax.set_rmax(max(windspeed) + 1)
ax.set_rticks(np.arange(0, max(windspeed), 1)) # less radial ticks
ax.set_rlabel_position(300) # get radial labels away from plotted line
ax.set_theta_zero_location("S")
ax.set_theta_direction(-1)
ax.grid(True)
# tick locations
thetaticks = np.arange(0,360,90)
ax.set_thetagrids(thetaticks, frac=1.01)
#img = plt.imread("speke.png")
#plt.imshow(img, extent=[0,10,0,10])
ax.set_title("PM25 %s to %s" % (allreadings[-1][0], allreadings[0][0]))
plt.show()
if __name__ == "__main__":
parser = OptionParser()
parser = OptionParser(usage='usage: %prog [options] ')
parser.add_option("-f", "--file", dest="filename",
help="", metavar="FILE")
parser.add_option('-m', '--mode',
type='choice',
action='store',
dest='mode',
choices=['plotpollution', 'debug', 'saveweather', 'plotpollutionLinear', 'plotRadial', 'regular'],
default='regular',
help='Choose mode',)
(options, args) = parser.parse_args()
mode = options.mode
loadAPIKeys()
#allreadings = loadReadings()
# remove duplicate entries (could have come in while debugging)
#ic = 0
#while ic < len(allreadings):
# r = allreadings[ic]
# while allreadings.count(r) > 1:
# allreadings.remove(r)
# ic += 1
if mode == 'debug':
#day, clock, reading = scrape()
#saveLastReading("readings.db", day, clock, reading)
#r = loadLastReading("readings.db")
#c = convert(r)
#print(c)
#print(scrape())
# find when we last posted an image
files = [f for f in os.listdir('.') if re.match("gauge_[0-9]*.gif", f)]
if files:
datelast = max([datetime.strptime(f, "gauge_%Y%m%d%H%M.gif") for f in files])
else:
datelast = datetime.today() - timedelta(days=100)
sincelastplot = (datetime.today() - datelast)
if (sincelastplot > timedelta(hours=24 * 2)):
allreadings = convert(loadAllReadings("readings.db"))
allreadings.reverse()
readings = [(d, h, r) for (d, h, r) in allreadings if toDT(d, h) >= datelast]
d0, d1 = toDT(readings[0][0], readings[0][1]), toDT(readings[-1][0], readings[-1][1])
fn = plotRadial(readings, PM25)
#tweet(PM25 + "\n%s - %s" % (d0.strftime("%d/%m/%Y"), d1.strftime("%d/%m/%Y")), None, fn)
elif mode == 'saveweather':
allreadings = convert(loadAllReadings("readings.db"))
getAndPickleWeather("weathertweets.bin", allreadings)
elif mode == 'plotpollution':
weathertweets = loadWeatherTweets("weathertweets.bin")
plotPolar(allreadings, weathertweets)
elif mode == 'plotRadial':
files = [f for f in os.listdir('.') if re.match("gauge_[0-9]*.gif", f)]
if files:
datelast = max([datetime.strptime(f, "gauge_%Y%m%d%H%M.gif") for f in files])
else:
datelast = datetime.today() - timedelta(days=100)
sincelastplot = (datetime.today() - datelast)
if (sincelastplot > timedelta(hours=24 * 2)):
allreadings = convert(loadAllReadings("readings.db"))
allreadings.reverse()
readings = [(d, h, r) for (d, h, r) in allreadings if toDT(d, h) >= datelast]
d0, d1 = toDT(readings[0][0], readings[0][1]), toDT(readings[-1][0], readings[-1][1])
fn = plotRadial(readings, PM25)
tweet(PM25 + "\n%s - %s" % (d0.strftime("%d/%m/%Y"), d1.strftime("%d/%m/%Y")), None, fn)
elif mode == "plotpollutionLinear":
# find when we last posted an image
files = [f for f in os.listdir('.') if re.match("figure_[0-9]*.png", f)]
if files:
datelast = max([datetime.strptime(f, "figure_%Y%m%d.png") for f in files])
datelast += timedelta(hours=12)
else:
datelast = datetime.today() - timedelta(days=100)
sincelastplot = (datetime.today() - datelast)
if (sincelastplot > timedelta(hours=24 * 3)):
allreadings = convert(loadAllReadings("readings.db"))
allreadings.reverse()
readings = [(d, h, r) for (d, h, r) in allreadings if toDT(d, h) >= datelast]
figure = plotLinear(readings, PM25)
d0, d1 = toDT(readings[0][0], readings[0][1]), toDT(readings[-1][0], readings[-1][1])
#tweet(PM25 + "\n%s - %s" % (d0.strftime("%d/%m/%Y"), d1.strftime("%d/%m/%Y")), None, figure)
else:
day, clock, reading = scrape()
r = loadLastReading("readings.db")
converted = convert(r)
assert(len(converted) == 1)
lastday, lastclock, lastreading = converted[-1]
if ((day, clock) != (lastday, lastclock)):
status = compose(day, clock, reading)
rtweet = tweet(status)
saveLastReading("readings.db", day, clock, reading)
allreadings = convert(loadAllReadings("readings.db"))
# compare with WHO recommendations
r = allreadings and compareWHO(allreadings)
if r:
stats = composeAboveTweet(day, clock, r, rtweet)
for s in stats:
tweet(s, replyto=rtweet)
else:
print("Reading already known")
|
apache-2.0
|
msingh172/youtube-dl
|
youtube_dl/downloader/rtmp.py
|
95
|
8353
|
from __future__ import unicode_literals
import os
import re
import subprocess
import time
from .common import FileDownloader
from ..compat import compat_str
from ..utils import (
check_executable,
encodeFilename,
encodeArgument,
get_exe_version,
)
def rtmpdump_version():
return get_exe_version(
'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
class RtmpFD(FileDownloader):
def real_download(self, filename, info_dict):
def run_rtmpdump(args):
start = time.time()
resume_percent = None
resume_downloaded_data_len = None
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
cursor_in_new_line = True
proc_stderr_closed = False
while not proc_stderr_closed:
# read line from stderr
line = ''
while True:
char = proc.stderr.read(1)
if not char:
proc_stderr_closed = True
break
if char in [b'\r', b'\n']:
break
line += char.decode('ascii', 'replace')
if not line:
# proc_stderr_closed is True
continue
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
if mobj:
downloaded_data_len = int(float(mobj.group(1)) * 1024)
percent = float(mobj.group(2))
if not resume_percent:
resume_percent = percent
resume_downloaded_data_len = downloaded_data_len
time_now = time.time()
eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent)
speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len)
data_len = None
if percent > 0:
data_len = int(downloaded_data_len * 100 / percent)
self._hook_progress({
'status': 'downloading',
'downloaded_bytes': downloaded_data_len,
'total_bytes_estimate': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'eta': eta,
'elapsed': time_now - start,
'speed': speed,
})
cursor_in_new_line = False
else:
# no percent for live streams
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
if mobj:
downloaded_data_len = int(float(mobj.group(1)) * 1024)
time_now = time.time()
speed = self.calc_speed(start, time_now, downloaded_data_len)
self._hook_progress({
'downloaded_bytes': downloaded_data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'status': 'downloading',
'elapsed': time_now - start,
'speed': speed,
})
cursor_in_new_line = False
elif self.params.get('verbose', False):
if not cursor_in_new_line:
self.to_screen('')
cursor_in_new_line = True
self.to_screen('[rtmpdump] ' + line)
proc.wait()
if not cursor_in_new_line:
self.to_screen('')
return proc.returncode
url = info_dict['url']
player_url = info_dict.get('player_url', None)
page_url = info_dict.get('page_url', None)
app = info_dict.get('app', None)
play_path = info_dict.get('play_path', None)
tc_url = info_dict.get('tc_url', None)
flash_version = info_dict.get('flash_version', None)
live = info_dict.get('rtmp_live', False)
conn = info_dict.get('rtmp_conn', None)
protocol = info_dict.get('rtmp_protocol', None)
real_time = info_dict.get('rtmp_real_time', False)
no_resume = info_dict.get('no_resume', False)
continue_dl = info_dict.get('continuedl', True)
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
test = self.params.get('test', False)
# Check for rtmpdump first
if not check_executable('rtmpdump', ['-h']):
self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
return False
# Download using rtmpdump. rtmpdump returns exit code 2 when
# the connection was interrumpted and resuming appears to be
# possible. This is part of rtmpdump's normal usage, AFAIK.
basic_args = [
'rtmpdump', '--verbose', '-r', url,
'-o', tmpfilename]
if player_url is not None:
basic_args += ['--swfVfy', player_url]
if page_url is not None:
basic_args += ['--pageUrl', page_url]
if app is not None:
basic_args += ['--app', app]
if play_path is not None:
basic_args += ['--playpath', play_path]
if tc_url is not None:
basic_args += ['--tcUrl', tc_url]
if test:
basic_args += ['--stop', '1']
if flash_version is not None:
basic_args += ['--flashVer', flash_version]
if live:
basic_args += ['--live']
if isinstance(conn, list):
for entry in conn:
basic_args += ['--conn', entry]
elif isinstance(conn, compat_str):
basic_args += ['--conn', conn]
if protocol is not None:
basic_args += ['--protocol', protocol]
if real_time:
basic_args += ['--realtime']
args = basic_args
if not no_resume and continue_dl and not live:
args += ['--resume']
if not live and continue_dl:
args += ['--skip', '1']
args = [encodeArgument(a) for a in args]
self._debug_cmd(args, exe='rtmpdump')
RD_SUCCESS = 0
RD_FAILED = 1
RD_INCOMPLETE = 2
RD_NO_CONNECT = 3
retval = run_rtmpdump(args)
if retval == RD_NO_CONNECT:
self.report_error('[rtmpdump] Could not connect to RTMP server.')
return False
while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
prevsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] %s bytes' % prevsize)
time.sleep(5.0) # This seems to be needed
args = basic_args + ['--resume']
if retval == RD_FAILED:
args += ['--skip', '1']
args = [encodeArgument(a) for a in args]
retval = run_rtmpdump(args)
cursize = os.path.getsize(encodeFilename(tmpfilename))
if prevsize == cursize and retval == RD_FAILED:
break
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024:
self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
retval = RD_SUCCESS
break
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] %s bytes' % fsize)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr('\n')
self.report_error('rtmpdump exited with code %d' % retval)
return False
|
unlicense
|
appcelerator/entourage
|
components/services/appengine/stub/simplejson/__init__.py
|
3
|
10786
|
r"""
A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson
>>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> simplejson.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.7.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8'
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if cls is None and encoding is None and object_hook is None and not kw:
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
return cls(encoding=encoding, **kw).decode(s)
def read(s):
"""
json-py API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
json-py API compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
|
apache-2.0
|
Intel-Corporation/tensorflow
|
tensorflow/python/debug/lib/grpc_tensorflow_server.py
|
22
|
4686
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python-based TensorFlow GRPC server.
Takes input arguments cluster_spec, job_name and task_id, and start a blocking
TensorFlow GRPC server.
Usage:
grpc_tensorflow_server.py --cluster_spec=SPEC --job_name=NAME --task_id=ID
Where:
SPEC is <JOB>(,<JOB>)*
JOB is <NAME>|<HOST:PORT>(;<HOST:PORT>)*
NAME is a valid job name ([a-z][0-9a-z]*)
HOST is a hostname or IP address
PORT is a port number
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
def parse_cluster_spec(cluster_spec, cluster, verbose=False):
"""Parse content of cluster_spec string and inject info into cluster protobuf.
Args:
cluster_spec: cluster specification string, e.g.,
"local|localhost:2222;localhost:2223"
cluster: cluster protobuf.
verbose: If verbose logging is requested.
Raises:
ValueError: if the cluster_spec string is invalid.
"""
job_strings = cluster_spec.split(",")
if not cluster_spec:
raise ValueError("Empty cluster_spec string")
for job_string in job_strings:
job_def = cluster.job.add()
if job_string.count("|") != 1:
raise ValueError("Not exactly one instance of '|' in cluster_spec")
job_name = job_string.split("|")[0]
if not job_name:
raise ValueError("Empty job_name in cluster_spec")
job_def.name = job_name
if verbose:
logging.info("Added job named \"%s\"", job_name)
job_tasks = job_string.split("|")[1].split(";")
for i in range(len(job_tasks)):
if not job_tasks[i]:
raise ValueError("Empty task string at position %d" % i)
job_def.tasks[i] = job_tasks[i]
if verbose:
logging.info(" Added task \"%s\" to job \"%s\"",
job_tasks[i], job_name)
def main(unused_args):
# Create Protobuf ServerDef
server_def = tensorflow_server_pb2.ServerDef(protocol="grpc")
# Cluster info
parse_cluster_spec(FLAGS.cluster_spec, server_def.cluster, FLAGS.verbose)
# Job name
if not FLAGS.job_name:
raise ValueError("Empty job_name")
server_def.job_name = FLAGS.job_name
# Task index
if FLAGS.task_id < 0:
raise ValueError("Invalid task_id: %d" % FLAGS.task_id)
server_def.task_index = FLAGS.task_id
config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions(
per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction))
# Create GRPC Server instance
server = server_lib.Server(server_def, config=config)
# join() is blocking, unlike start()
server.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--cluster_spec",
type=str,
default="",
help="""\
Cluster spec: SPEC. SPEC is <JOB>(,<JOB>)*," JOB is
<NAME>|<HOST:PORT>(;<HOST:PORT>)*," NAME is a valid job name
([a-z][0-9a-z]*)," HOST is a hostname or IP address," PORT is a
port number." E.g., local|localhost:2222;localhost:2223,
ps|ps0:2222;ps1:2222\
"""
)
parser.add_argument(
"--job_name",
type=str,
default="",
help="Job name: e.g., local"
)
parser.add_argument(
"--task_id",
type=int,
default=0,
help="Task index, e.g., 0"
)
parser.add_argument(
"--gpu_memory_fraction",
type=float,
default=1.0,
help="Fraction of GPU memory allocated",)
parser.add_argument(
"--verbose",
type="bool",
nargs="?",
const=True,
default=False,
help="Verbose mode"
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
Abi1ity/uniclust2.0
|
SQLAlchemy-0.9.9/test/ext/test_horizontal_shard.py
|
2
|
8747
|
import datetime
import os
from sqlalchemy import *
from sqlalchemy import event
from sqlalchemy import sql, util
from sqlalchemy.orm import *
from sqlalchemy.ext.horizontal_shard import ShardedSession
from sqlalchemy.sql import operators
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import eq_
# TODO: ShardTest can be turned into a base for further subclasses
class ShardTest(object):
__skip_if__ = (lambda: util.win32,)
__requires__ = 'sqlite',
schema = None
def setUp(self):
global db1, db2, db3, db4, weather_locations, weather_reports
db1, db2, db3, db4 = self._init_dbs()
meta = MetaData()
ids = Table('ids', meta,
Column('nextid', Integer, nullable=False))
def id_generator(ctx):
# in reality, might want to use a separate transaction for this.
c = db1.contextual_connect()
nextid = c.execute(ids.select(for_update=True)).scalar()
c.execute(ids.update(values={ids.c.nextid: ids.c.nextid + 1}))
return nextid
weather_locations = Table("weather_locations", meta,
Column('id', Integer, primary_key=True, default=id_generator),
Column('continent', String(30), nullable=False),
Column('city', String(50), nullable=False),
schema=self.schema
)
weather_reports = Table(
'weather_reports',
meta,
Column('id', Integer, primary_key=True),
Column('location_id', Integer,
ForeignKey(weather_locations.c.id)),
Column('temperature', Float),
Column('report_time', DateTime,
default=datetime.datetime.now),
schema=self.schema
)
for db in (db1, db2, db3, db4):
meta.create_all(db)
db1.execute(ids.insert(), nextid=1)
self.setup_session()
self.setup_mappers()
@classmethod
def setup_session(cls):
global create_session
shard_lookup = {
'North America': 'north_america',
'Asia': 'asia',
'Europe': 'europe',
'South America': 'south_america',
}
def shard_chooser(mapper, instance, clause=None):
if isinstance(instance, WeatherLocation):
return shard_lookup[instance.continent]
else:
return shard_chooser(mapper, instance.location)
def id_chooser(query, ident):
return ['north_america', 'asia', 'europe', 'south_america']
def query_chooser(query):
ids = []
class FindContinent(sql.ClauseVisitor):
def visit_binary(self, binary):
if binary.left.shares_lineage(
weather_locations.c.continent):
if binary.operator == operators.eq:
ids.append(shard_lookup[binary.right.value])
elif binary.operator == operators.in_op:
for bind in binary.right.clauses:
ids.append(shard_lookup[bind.value])
if query._criterion is not None:
FindContinent().traverse(query._criterion)
if len(ids) == 0:
return ['north_america', 'asia', 'europe',
'south_america']
else:
return ids
create_session = sessionmaker(class_=ShardedSession,
autoflush=True, autocommit=False)
create_session.configure(shards={
'north_america': db1,
'asia': db2,
'europe': db3,
'south_america': db4,
}, shard_chooser=shard_chooser, id_chooser=id_chooser,
query_chooser=query_chooser)
@classmethod
def setup_mappers(cls):
global WeatherLocation, Report
class WeatherLocation(object):
def __init__(self, continent, city):
self.continent = continent
self.city = city
class Report(object):
def __init__(self, temperature):
self.temperature = temperature
mapper(WeatherLocation, weather_locations, properties={
'reports': relationship(Report, backref='location'),
'city': deferred(weather_locations.c.city),
})
mapper(Report, weather_reports)
def _fixture_data(self):
tokyo = WeatherLocation('Asia', 'Tokyo')
newyork = WeatherLocation('North America', 'New York')
toronto = WeatherLocation('North America', 'Toronto')
london = WeatherLocation('Europe', 'London')
dublin = WeatherLocation('Europe', 'Dublin')
brasilia = WeatherLocation('South America', 'Brasila')
quito = WeatherLocation('South America', 'Quito')
tokyo.reports.append(Report(80.0))
newyork.reports.append(Report(75))
quito.reports.append(Report(85))
sess = create_session()
for c in [
tokyo,
newyork,
toronto,
london,
dublin,
brasilia,
quito,
]:
sess.add(c)
sess.commit()
sess.close()
return sess
def test_roundtrip(self):
sess = self._fixture_data()
tokyo = sess.query(WeatherLocation).filter_by(city="Tokyo").one()
tokyo.city # reload 'city' attribute on tokyo
sess.expunge_all()
eq_(db2.execute(weather_locations.select()).fetchall(), [(1,
'Asia', 'Tokyo')])
eq_(db1.execute(weather_locations.select()).fetchall(), [(2,
'North America', 'New York'), (3, 'North America', 'Toronto'
)])
eq_(sess.execute(weather_locations.select(), shard_id='asia'
).fetchall(), [(1, 'Asia', 'Tokyo')])
t = sess.query(WeatherLocation).get(tokyo.id)
eq_(t.city, tokyo.city)
eq_(t.reports[0].temperature, 80.0)
north_american_cities = \
sess.query(WeatherLocation).filter(WeatherLocation.continent
== 'North America')
eq_(set([c.city for c in north_american_cities]),
set(['New York', 'Toronto']))
asia_and_europe = \
sess.query(WeatherLocation).filter(
WeatherLocation.continent.in_(['Europe', 'Asia']))
eq_(set([c.city for c in asia_and_europe]), set(['Tokyo',
'London', 'Dublin']))
def test_shard_id_event(self):
canary = []
def load(instance, ctx):
canary.append(ctx.attributes["shard_id"])
event.listen(WeatherLocation, "load", load)
sess = self._fixture_data()
tokyo = sess.query(WeatherLocation).\
filter_by(city="Tokyo").set_shard("asia").one()
sess.query(WeatherLocation).all()
eq_(
canary,
['asia', 'north_america', 'north_america',
'europe', 'europe', 'south_america',
'south_america']
)
class DistinctEngineShardTest(ShardTest, fixtures.TestBase):
def _init_dbs(self):
db1 = testing_engine('sqlite:///shard1.db',
options=dict(pool_threadlocal=True))
db2 = testing_engine('sqlite:///shard2.db')
db3 = testing_engine('sqlite:///shard3.db')
db4 = testing_engine('sqlite:///shard4.db')
return db1, db2, db3, db4
def tearDown(self):
clear_mappers()
for db in (db1, db2, db3, db4):
db.connect().invalidate()
for i in range(1, 5):
os.remove("shard%d.db" % i)
class AttachedFileShardTest(ShardTest, fixtures.TestBase):
schema = "changeme"
def _init_dbs(self):
db1 = testing_engine('sqlite://', options={"execution_options":
{"shard_id": "shard1"}})
assert db1._has_events
db2 = db1.execution_options(shard_id="shard2")
db3 = db1.execution_options(shard_id="shard3")
db4 = db1.execution_options(shard_id="shard4")
import re
@event.listens_for(db1, "before_cursor_execute", retval=True)
def _switch_shard(conn, cursor, stmt, params, context, executemany):
shard_id = conn._execution_options['shard_id']
# because SQLite can't just give us a "use" statement, we have
# to use the schema hack to locate table names
if shard_id:
stmt = re.sub(r"\"?changeme\"?\.", shard_id + "_", stmt)
return stmt, params
return db1, db2, db3, db4
|
bsd-3-clause
|
obreitwi/nest-simulator
|
extras/include_checker.py
|
14
|
8100
|
# -*- coding: utf-8 -*-
#
# include_checker.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import sys
"""
This script suggest C/CPP include orders that conform to the NEST coding style
guidelines. Call the script like (from NEST sources):
For one file:
python extras/include_checker.py -nest $PWD -f nest/main.cpp
For one directory:
python extras/include_checker.py -nest $PWD -d nest
If everything is OK, or only few includes are in the wrong order, it will print
something like:
Includes for main.cpp are OK! Includes in wrong order: 0
If something is wrong, it will print the suggestion:
Includes for neststartup.h are WRONG! Includes in wrong order: 5
##############################
Suggested includes for neststartup.h:
##############################
// C includes:
#include <neurosim/pyneurosim.h>
// C++ includes:
#include <string>
// Generated includes:
#include "config.h"
// Includes from conngen:
#include "conngenmodule.h"
// Includes from sli:
#include "datum.h"
"""
# We would like to have files that are not actually provided by
# the NEST Initiative, e.g. implementing the Google Sparsetable,
# to be exactly like they come from the upstream source.
excludes_files = ["sparsetable.h", "libc_allocator_with_realloc.h",
"hashtable-common.h", "sparseconfig.h", "template_util.h"]
class IncludeInfo():
filename = ""
name = ""
spiky = False
origin = "a_unknown"
def __init__(self, filename, name, spiky, all_headers):
self.filename = filename
self.name = name
self.spiky = spiky
self.set_origin(all_headers)
def is_header_include(self):
return (self.name.split('.')[0] == self.filename.split('.')[0] or
self.name.split('.')[0] == self.filename.split('_impl.')[0])
def is_cpp_include(self):
return (not self.name.endswith('.h') and
not self.name.endswith('.hpp') and self.spiky)
def is_c_include(self):
return self.name.endswith('.h') and self.spiky
def is_project_include(self):
return (not self.spiky and
(self.name.endswith('.h') or self.name.endswith('.hpp')))
def set_origin(self, includes):
for k, v in includes.iteritems():
if self.name in v:
self.origin = k
break
def cmp_value(self):
v = 8 if self.is_header_include() else 0
v += 4 if self.is_c_include() else 0
v += 2 if self.is_cpp_include() else 0
v += 1 if self.is_project_include() else 0
return v
def __cmp__(self, other):
s = self.cmp_value()
o = other.cmp_value()
val = o - s
if val == 0:
val = cmp(self.origin, other.origin)
if val == 0:
return cmp(self.name, other.name)
else:
return val
else:
return val
def to_string(self):
l_guard = '<' if self.spiky else '"'
r_guard = '>' if self.spiky else '"'
return '#include ' + l_guard + self.name + r_guard
def all_includes(path):
result = {}
dirs = [d for d in next(os.walk(path))[1] if d[0] != '.']
for d in dirs:
for root, dirs, files in os.walk(os.path.join(path, d)):
tmp = [f for f in files if f.endswith(".h") or f.endswith(".hpp")]
if len(tmp) > 0:
result[d] = tmp
return result
def create_include_info(line, filename, all_headers):
match = re.search('^#include ([<"])(.*)([>"])', line)
name = match.group(2)
spiky = match.group(1) == '<'
return IncludeInfo(filename, name, spiky, all_headers)
def get_includes_from(file, all_headers):
includes = []
with open(file, 'r') as f:
for line in f:
if line.startswith('#include'):
includes += [create_include_info(line,
os.path.basename(file),
all_headers)]
return includes
def is_include_order_ok(includes):
s_incs = sorted(includes)
return len(includes) - len([i for i, s in zip(includes, s_incs)
if i.name == s.name])
def print_includes(includes):
s_incs = sorted(includes)
is_c = False
is_cpp = False
origin = ""
for i in s_incs:
if not i.is_header_include():
if not is_c and i.is_c_include():
is_c = True
is_cpp = False
origin = ""
print("\n// C includes:")
if not is_cpp and i.is_cpp_include():
is_c = False
is_cpp = True
origin = ""
print("\n// C++ includes:")
if i.is_project_include() and origin != i.origin:
is_c = False
is_cpp = False
origin = i.origin
if i.origin == "a_unknown":
print("\n// Generated includes:")
else:
print("\n// Includes from " + i.origin + ":")
print(i.to_string())
def process_source(path, f, all_headers, print_suggestion):
if f in excludes_files:
print("Not checking file " + f + " as it is in the exclude list. " +
"Please do not change the order of includes.")
return 0
includes = get_includes_from(os.path.join(path, f), all_headers)
order_ok = is_include_order_ok(includes)
if order_ok <= 2:
print("Includes for " + f + " are OK! Includes in wrong order: " +
str(order_ok))
if order_ok > 2:
print("Includes for " + f + " are WRONG! Includes in wrong order: " +
str(order_ok))
if print_suggestion:
print("\n##############################")
print("Suggested includes for " + f + ":")
print("##############################\n")
print_includes(includes)
print("\n##############################")
return order_ok
def process_all_sources(path, all_headers, print_suggestion):
count = 0
for root, dirs, files in os.walk(path):
for f in files:
if re.search("\.h$|\.hpp$|\.c$|\.cc|\.cpp$", f):
# valid source file
count += process_source(root, f, all_headers, print_suggestion)
for d in dirs:
count += process_all_sources(os.path.join(root, d), all_headers,
print_suggestion)
return count
def usage(exitcode):
print("Use like:")
print(" " + sys.argv[0] + " -nest <nest-base-dir>" +
" (-f <filename> | -d <base-directory>)")
sys.exit(exitcode)
if __name__ == '__main__':
print_suggestion = True
if len(sys.argv) != 5:
usage(1)
if sys.argv[1] == '-nest' and os.path.isdir(sys.argv[2]):
all_headers = all_includes(sys.argv[2])
else:
usage(2)
if sys.argv[3] == '-f' and os.path.isfile(sys.argv[4]):
path = os.path.dirname(sys.argv[4])
file = os.path.basename(sys.argv[4])
process_source(path, file, all_headers, print_suggestion)
elif sys.argv[3] == '-d' and os.path.isdir(sys.argv[4]):
dir = sys.argv[4]
process_all_sources(dir, all_headers, print_suggestion)
else:
usage(3)
|
gpl-2.0
|
cacarrara/cacarrara.github.io
|
pelicanconf.py
|
1
|
1925
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Caio Carrara'
AUTHOR_EMAIL = u'[email protected]'
SITENAME = u'Caio Carrara'
SITEURL = 'http://caiocarrara.com.br'
PATH = 'content'
TIMEZONE = 'America/Sao_Paulo'
DEFAULT_LANG = u'pt'
USE_FOLDER_AS_CATEGORY = True
DEFAULT_METADATA = (
('about_author', 'Programador para o resto da vida. Falando sobre software e a vida.'),
('author_g_plus_id', '108931761041773828029'),
)
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
MENUITEMS = [
('Arquivo', 'archives.html'),
('Sobre', 'sobre.html'),
('Contato', 'contato.html'),
]
SOCIAL = (
('github', 'https://github.com/cacarrara/'),
('twitter-square', 'https://twitter.com/CaioWCC'),
('rss', 'http://caiocarrara.com.br/feeds/caio-carrara.atom.xml'),
)
PLUGIN_PATHS = ['pelican-plugins', ]
PLUGINS = [
'sitemap',
'gravatar',
'share_post',
'gzip_cache', # keep this as last plugin
]
DEFAULT_PAGINATION = 7
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
PAGE_URL = PAGE_SAVE_AS = '{slug}.html'
THEME = 'themes/pure'
COVER_IMG_URL = 'http://i.imgur.com/uZIQDHD.jpg'
TAGLINE = (
'Desenvolvimento de software, Python, Internet Livre, negócios e ideias aleatórias. Não necessariamente nessa ordem'
)
GITHUB_URL = 'https://github.com/cacarrara/cacarrara.github.io'
DISQUS_SITENAME = 'caiocarrara'
GOOGLE_ANALYTICS = 'UA-28003582-1'
SITEMAP = {
'format': 'xml',
'priorities': {
'articles': 0.8,
'indexes': 0.5,
'pages': 0.3
},
'changefreqs': {
'articles': 'daily',
'indexes': 'daily',
'pages': 'monthly'
}
}
STATIC_PATHS = ['images', 'extra/CNAME', ]
EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'}, }
|
apache-2.0
|
artemrizhov/django-mail-templated
|
setup.py
|
1
|
1166
|
import os
from setuptools import setup, find_packages
DESCRIPTION = 'Send emails using Django template system'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Django',
]
VERSION = '2.6.5'
VERSION = os.environ.get('MAIL_TEMPLATED_VERSION', VERSION)
setup(
name='django-mail-templated',
version=VERSION,
packages=find_packages(),
include_package_data=True,
author='Artem Rizhov',
author_email='[email protected]',
url='https://github.com/artemrizhov/django-mail-templated',
license='MIT',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=CLASSIFIERS,
test_suite='mail_templated.test_utils.run.run_tests',
)
|
mit
|
hackbutty/git-repo
|
progress.py
|
143
|
2036
|
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from time import time
from trace import IsTrace
_NOT_TTY = not os.isatty(2)
class Progress(object):
def __init__(self, title, total=0, units=''):
self._title = title
self._total = total
self._done = 0
self._lastp = -1
self._start = time()
self._show = False
self._units = units
def update(self, inc=1):
self._done += inc
if _NOT_TTY or IsTrace():
return
if not self._show:
if 0.5 <= time() - self._start:
self._show = True
else:
return
if self._total <= 0:
sys.stderr.write('\r%s: %d, ' % (
self._title,
self._done))
sys.stderr.flush()
else:
p = (100 * self._done) / self._total
if self._lastp != p:
self._lastp = p
sys.stderr.write('\r%s: %3d%% (%d%s/%d%s) ' % (
self._title,
p,
self._done, self._units,
self._total, self._units))
sys.stderr.flush()
def end(self):
if _NOT_TTY or IsTrace() or not self._show:
return
if self._total <= 0:
sys.stderr.write('\r%s: %d, done. \n' % (
self._title,
self._done))
sys.stderr.flush()
else:
p = (100 * self._done) / self._total
sys.stderr.write('\r%s: %3d%% (%d%s/%d%s), done. \n' % (
self._title,
p,
self._done, self._units,
self._total, self._units))
sys.stderr.flush()
|
apache-2.0
|
decvalts/landlab
|
scripts/test-installed-landlab.py
|
1
|
1105
|
#! /usr/bin/env python
import sys, os
sys.path.pop(0)
from optparse import OptionParser
parser = OptionParser('usage: %prog [options] -- [nosetests options]')
parser.add_option('-v', '--verbose', action='count', dest='verbose',
default=1, help='increase verbosity [%default]')
parser.add_option('--doctests', action='store_true', dest='doctests',
default=False, help='Run doctests in module [%default]')
parser.add_option('--coverage', action='store_true', dest='coverage',
default=False, help='report coverage of landlab [%default]')
parser.add_option('-m', '--mode', action='store', dest='mode', default='fast',
help='"fast", "full", or something that can be passed to '
'nosetests -A [%default]')
(options, args) = parser.parse_args()
import landlab
result = landlab.test(label=options.mode, verbose=options.verbose,
doctests=options.doctests, coverage=options.coverage,
extra_argv=args)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
|
mit
|
ctiller/grpc
|
tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/network_services.py
|
4
|
3665
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
import dataclasses
from google.rpc import code_pb2
import tenacity
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
class NetworkServicesV1Alpha1(gcp.api.GcpStandardCloudApiResource):
ENDPOINT_CONFIG_SELECTORS = 'endpointConfigSelectors'
@dataclasses.dataclass(frozen=True)
class EndpointConfigSelector:
url: str
name: str
type: str
server_tls_policy: Optional[str]
traffic_port_selector: dict
endpoint_matcher: dict
http_filters: dict
update_time: str
create_time: str
def __init__(self, api_manager: gcp.api.GcpApiManager, project: str):
super().__init__(api_manager.networkservices(self.api_version), project)
# Shortcut to projects/*/locations/ endpoints
self._api_locations = self.api.projects().locations()
@property
def api_name(self) -> str:
return 'networkservices'
@property
def api_version(self) -> str:
return 'v1alpha1'
def create_endpoint_config_selector(self, name, body: dict):
return self._create_resource(
self._api_locations.endpointConfigSelectors(),
body,
endpointConfigSelectorId=name)
def get_endpoint_config_selector(self, name: str) -> EndpointConfigSelector:
result = self._get_resource(
collection=self._api_locations.endpointConfigSelectors(),
full_name=self.resource_full_name(name,
self.ENDPOINT_CONFIG_SELECTORS))
return self.EndpointConfigSelector(
name=name,
url=result['name'],
type=result['type'],
server_tls_policy=result.get('serverTlsPolicy', None),
traffic_port_selector=result['trafficPortSelector'],
endpoint_matcher=result['endpointMatcher'],
http_filters=result['httpFilters'],
update_time=result['updateTime'],
create_time=result['createTime'])
def delete_endpoint_config_selector(self, name):
return self._delete_resource(
collection=self._api_locations.endpointConfigSelectors(),
full_name=self.resource_full_name(name,
self.ENDPOINT_CONFIG_SELECTORS))
def _execute(self, *args, **kwargs): # pylint: disable=signature-differs
# Workaround TD bug: throttled operations are reported as internal.
# Ref b/175345578
retryer = tenacity.Retrying(
retry=tenacity.retry_if_exception(self._operation_internal_error),
wait=tenacity.wait_fixed(10),
stop=tenacity.stop_after_delay(5 * 60),
before_sleep=tenacity.before_sleep_log(logger, logging.DEBUG),
reraise=True)
retryer(super()._execute, *args, **kwargs)
@staticmethod
def _operation_internal_error(exception):
return (isinstance(exception, gcp.api.OperationError) and
exception.error.code == code_pb2.INTERNAL)
|
apache-2.0
|
SOKP/external_chromium_org
|
tools/telemetry/telemetry/core/backends/chrome/oobe.py
|
33
|
2064
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core import web_contents
class Oobe(web_contents.WebContents):
def __init__(self, inspector_backend, backend_list):
super(Oobe, self).__init__(inspector_backend, backend_list)
def _GaiaLoginContext(self):
max_context_id = self.EnableAllContexts()
logging.debug('%d contexts in Gaia page' % max_context_id)
for gaia_context in range(max_context_id + 1):
try:
if self.EvaluateJavaScriptInContext(
"document.readyState == 'complete' && "
"document.getElementById('Email') != null",
gaia_context):
return gaia_context
except exceptions.EvaluateException:
pass
return None
def _ExecuteOobeApi(self, api, *args):
logging.info('Invoking %s' % api)
self.WaitForJavaScriptExpression("typeof Oobe == 'function'", 20)
if self.EvaluateJavaScript("typeof %s == 'undefined'" % api):
raise exceptions.LoginException('%s js api missing' % api)
js = api + '(' + ("'%s'," * len(args)).rstrip(',') + ');'
self.ExecuteJavaScript(js % args)
def NavigateGuestLogin(self):
"""Logs in as guest."""
self._ExecuteOobeApi('Oobe.guestLoginForTesting')
def NavigateFakeLogin(self, username, password):
"""Fake user login."""
self._ExecuteOobeApi('Oobe.loginForTesting', username, password)
def NavigateGaiaLogin(self, username, password):
"""Logs in to GAIA with provided credentials."""
self._ExecuteOobeApi('Oobe.addUserForTesting')
gaia_context = util.WaitFor(self._GaiaLoginContext, timeout=30)
self.ExecuteJavaScriptInContext("""
document.getElementById('Email').value='%s';
document.getElementById('Passwd').value='%s';
document.getElementById('signIn').click();"""
% (username, password),
gaia_context)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.