repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
tmuelle2/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/thread/messagepump.py | 151 | 2482 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class MessagePumpDelegate(object):
def schedule(self, interval, callback):
raise NotImplementedError, "subclasses must implement"
def message_available(self, message):
raise NotImplementedError, "subclasses must implement"
def final_message_delivered(self):
raise NotImplementedError, "subclasses must implement"
class MessagePump(object):
interval = 10 # seconds
def __init__(self, delegate, message_queue):
self._delegate = delegate
self._message_queue = message_queue
self._schedule()
def _schedule(self):
self._delegate.schedule(self.interval, self._callback)
def _callback(self):
(messages, is_running) = self._message_queue.take_all()
for message in messages:
self._delegate.message_available(message)
if not is_running:
self._delegate.final_message_delivered()
return
self._schedule()
| bsd-3-clause |
stscieisenhamer/glue | glue/viewers/common/viz_client.py | 2 | 9401 | from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
from glue.core import Data
from glue.core.message import SettingsChangeMessage
from glue.core.client import Client
from glue.core.layer_artist import LayerArtistContainer
from glue.utils.matplotlib import freeze_margins
__all__ = ['VizClient', 'GenericMplClient']
class VizClient(Client):
"""
The VizClient class provides an interface (and minimal
implementation) for a generic client that creates
visualizations. The goal of VizClient is to provide a reusable way
to organize client plotting code.
Clients which extend VizClient should override the following methods
to perform specific visualization tasks
* _update_axis_labels
* _update_data_plot
* _update_subset_single
* _redraw
* init_layer
VizClient provides a public refresh() method that calls all of
these methods.
Attributes
----------
options: A dictionary of global plot options, to be handled by
subclasses.
"""
def __init__(self, data, options=None):
Client.__init__(self, data)
if not options:
self.options = {}
else:
self.options = options
def _add_data(self, message):
pass
def _remove_data(self, message):
pass
def _update_data(self, message):
"""
Method to handle messages sent by the dataset. Refreshes the display.
"""
self._update_data_plot()
self.refresh()
def _add_subset(self, message):
"""
Method to handle messages sent when subsets are created.
"""
s = message.subset
self.init_layer(s)
self._redraw()
def _update_subset(self, message):
"""
Method to handle messages sent when subsets are modified.
The plot properties of the modified subset are refreshed.
"""
s = message.subset
self._update_subset_single(s, redraw=True)
def refresh(self):
"""
Update and redraw all plot information.
"""
self._update_data_plot()
self._update_subset_plots()
self._update_axis_labels()
self._redraw()
def _redraw(self):
"""
Redraw, but do not update, plot information
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_axis_labels(self):
"""
Sync the axis labels to reflect which components are
currently being plotted
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_data_plot(self):
"""
Sync the location of the scatter points to
reflect what components are being plotted
"""
raise NotImplementedError("VizClient cannot draw!")
def _update_subset_plots(self, redraw=False):
"""
Sync the location and visual properties
of each point in each subset
"""
for d in self.data:
for s in d.subsets:
self._update_subset_single(s)
if redraw:
self._redraw()
def _update_subset_single(self, s, redraw=False):
"""
Update the properties of a subset
Parameters
----------
s: A subset instance
The subset to refresh.
"""
raise NotImplementedError("VizClient Cannot Draw!")
def init_layer(self, layer):
"""Initialize a plot of a data or subset object for the first time.
Parameters
----------
layer: Data or subset instance
"""
raise NotImplementedError()
def set_background_color(axes, color):
axes.figure.set_facecolor(color)
axes.patch.set_facecolor(color)
def set_foreground_color(axes, color):
if hasattr(axes, 'coords'):
axes.coords.frame.set_color(color)
axes.coords.frame.set_linewidth(1)
for coord in axes.coords:
coord.set_ticks(color=color)
coord.set_ticklabel(color=color)
coord.axislabels.set_color(color)
else:
for spine in axes.spines.values():
spine.set_color(color)
axes.tick_params(color=color,
labelcolor=color)
axes.xaxis.label.set_color(color)
axes.yaxis.label.set_color(color)
def update_appearance_from_settings(axes):
from glue.config import settings
set_background_color(axes, settings.BACKGROUND_COLOR)
set_foreground_color(axes, settings.FOREGROUND_COLOR)
def init_mpl(figure=None, axes=None, wcs=False, axes_factory=None):
if (axes is not None and figure is not None and
axes.figure is not figure):
raise ValueError("Axes and figure are incompatible")
try:
from astropy.visualization.wcsaxes import WCSAxesSubplot
except ImportError:
WCSAxesSubplot = None
if axes is not None:
_axes = axes
_figure = axes.figure
else:
_figure = figure or plt.figure()
if wcs and WCSAxesSubplot is not None:
_axes = WCSAxesSubplot(_figure, 111)
_figure.add_axes(_axes)
else:
if axes_factory is None:
_axes = _figure.add_subplot(1, 1, 1)
else:
_axes = axes_factory(_figure)
freeze_margins(_axes, margins=[1, 0.25, 0.50, 0.25])
update_appearance_from_settings(_axes)
return _figure, _axes
class GenericMplClient(Client):
"""
This client base class handles the logic of adding, removing,
and updating layers.
Subsets are auto-added and removed with datasets.
New subsets are auto-added iff the data has already been added
"""
def __init__(self, data=None, figure=None, axes=None,
layer_artist_container=None, axes_factory=None):
super(GenericMplClient, self).__init__(data=data)
if axes_factory is None:
axes_factory = self.create_axes
figure, self.axes = init_mpl(figure, axes, axes_factory=axes_factory)
self.artists = layer_artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
self._connect()
def create_axes(self, figure):
return figure.add_subplot(1, 1, 1)
def _connect(self):
pass
@property
def collect(self):
# a better name
return self.data
def _redraw(self):
self.axes.figure.canvas.draw()
def new_layer_artist(self, layer):
raise NotImplementedError
def apply_roi(self, roi):
raise NotImplementedError
def _update_layer(self, layer):
raise NotImplementedError
def add_layer(self, layer):
"""
Add a new Data or Subset layer to the plot.
Returns the created layer artist
:param layer: The layer to add
:type layer: :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
"""
if layer.data not in self.collect:
return
if layer in self.artists:
return self.artists[layer][0]
result = self.new_layer_artist(layer)
self.artists.append(result)
self._update_layer(layer)
self.add_layer(layer.data)
for s in layer.data.subsets:
self.add_layer(s)
if layer.data is layer: # Added Data object. Relimit view
self.axes.autoscale_view(True, True, True)
return result
def remove_layer(self, layer):
if layer not in self.artists:
return
self.artists.pop(layer)
if isinstance(layer, Data):
list(map(self.remove_layer, layer.subsets))
self._redraw()
def set_visible(self, layer, state):
"""
Toggle a layer's visibility
:param layer: which layer to modify
:param state: True or False
"""
def _update_all(self):
for layer in self.artists.layers:
self._update_layer(layer)
def __contains__(self, layer):
return layer in self.artists
# Hub message handling
def _add_subset(self, message):
self.add_layer(message.sender)
def _remove_subset(self, message):
self.remove_layer(message.sender)
def _update_subset(self, message):
self._update_layer(message.sender)
def _update_data(self, message):
self._update_layer(message.sender)
def _remove_data(self, message):
self.remove_layer(message.data)
def register_to_hub(self, hub):
super(GenericMplClient, self).register_to_hub(hub)
def is_appearance_settings(msg):
return ('BACKGROUND_COLOR' in msg.settings or
'FOREGROUND_COLOR' in msg.settings)
hub.subscribe(self, SettingsChangeMessage,
self._update_appearance_from_settings,
filter=is_appearance_settings)
def _update_appearance_from_settings(self, message):
update_appearance_from_settings(self.axes)
self._redraw()
def restore_layers(self, layers, context):
""" Re-generate plot layers from a glue-serialized list"""
for l in layers:
l.pop('_type')
props = dict((k, context.object(v)) for k, v in l.items())
layer = self.add_layer(props['layer'])
layer.properties = props
| bsd-3-clause |
RenderBroken/Victara-Stock-kernel | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
Grumbel/scatterbackup | tests/test_generation.py | 1 | 1634 | # ScatterBackup - A chaotic backup solution
# Copyright (C) 2015 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from scatterbackup.generation import GenerationRange
class GeneratorTestCase(unittest.TestCase):
def test_generation_range(self):
self.assertEqual(GenerationRange.from_string("10:20"), GenerationRange(10, 20))
self.assertEqual(GenerationRange.from_string(":20"), GenerationRange(None, 20))
self.assertEqual(GenerationRange.from_string("10:"), GenerationRange(10, None))
self.assertEqual(GenerationRange.from_string(":"), GenerationRange.MATCH_ALL)
self.assertEqual(GenerationRange.from_string(""), GenerationRange.MATCH_ALL)
self.assertRaises(Exception, lambda: GenerationRange.from_string("ERROR"))
self.assertRaises(Exception, lambda: GenerationRange.from_string("ERROR:"))
self.assertRaises(Exception, lambda: GenerationRange.from_string(":ERROR"))
if __name__ == '__main__':
unittest.main()
# EOF #
| gpl-3.0 |
ian-garrett/meetMe | env/lib/python3.4/site-packages/pip/_vendor/lockfile/symlinklockfile.py | 487 | 2613 | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class SymlinkLockFile(LockBase):
"""Lock access to a file using symlink(2)."""
def __init__(self, path, threaded=True, timeout=None):
# super(SymlinkLockFile).__init(...)
LockBase.__init__(self, path, threaded, timeout)
# split it back!
self.unique_name = os.path.split(self.unique_name)[1]
def acquire(self, timeout=None):
# Hopefully unnecessary for symlink.
#try:
# open(self.unique_name, "wb").close()
#except IOError:
# raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a symbolic link to it.
try:
os.symlink(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
if self.i_am_locking():
# Linked to out unique name. Proceed.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout/10 if timeout is not None else 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.islink(self.lock_file)
def i_am_locking(self):
return os.path.islink(self.lock_file) and \
os.readlink(self.lock_file) == self.unique_name
def break_lock(self):
if os.path.islink(self.lock_file): # exists && link
os.unlink(self.lock_file)
| artistic-2.0 |
psyhofreak/iTerm2 | tests/esctest/tests/tbc.py | 31 | 1409 | from esc import ESC, TAB
import esccmd
import escio
from escutil import AssertEQ, GetCursorPosition
from esctypes import Point
class TBCTests(object):
def test_TBC_Default(object):
"""No param clears the tab stop at the cursor."""
escio.Write(TAB)
AssertEQ(GetCursorPosition().x(), 9)
esccmd.TBC()
esccmd.CUP(Point(1, 1))
escio.Write(TAB)
AssertEQ(GetCursorPosition().x(), 17)
def test_TBC_0(object):
"""0 param clears the tab stop at the cursor."""
escio.Write(TAB)
AssertEQ(GetCursorPosition().x(), 9)
esccmd.TBC(0)
esccmd.CUP(Point(1, 1))
escio.Write(TAB)
AssertEQ(GetCursorPosition().x(), 17)
def test_TBC_3(object):
"""3 param clears all tab stops."""
# Remove all tab stops
esccmd.TBC(3)
# Set a tab stop at 30
esccmd.CUP(Point(30, 1))
esccmd.HTS()
# Move back to the start and then tab. Should go to 30.
esccmd.CUP(Point(1, 1))
escio.Write(TAB)
AssertEQ(GetCursorPosition().x(), 30)
def test_TBC_NoOp(object):
"""Clearing a nonexistent tab stop should do nothing."""
# Move to 10 and clear the tab stop
esccmd.CUP(Point(10, 1))
esccmd.TBC(0)
# Move to 1 and tab twice, ensuring the stops at 9 and 17 are still there.
esccmd.CUP(Point(1, 1))
escio.Write(TAB)
AssertEQ(GetCursorPosition().x(), 9)
escio.Write(TAB)
AssertEQ(GetCursorPosition().x(), 17)
| gpl-2.0 |
skywave/caf-zte-blade | scripts/gcc-wrapper.py | 41 | 3803 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:720",
"async.c:122",
"async.c:270",
"dir.c:43",
"dm.c:1053",
"dm.c:1080",
"dm-table.c:1120",
"dm-table.c:1126",
"drm_edid.c:1303",
"eventpoll.c:1143",
"f_mass_storage.c:3368",
"inode.c:72",
"inode.c:73",
"inode.c:74",
"msm_sdcc.c:126",
"msm_sdcc.c:128",
"nf_conntrack_netlink.c:790",
"nf_nat_standalone.c:118",
"return_address.c:61",
"soc-core.c:1719",
"xt_log.h:50",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
caldwell/servo | tests/wpt/css-tests/tools/html5lib/html5lib/trie/datrie.py | 785 | 1166 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| mpl-2.0 |
luiscarlosgph/nas | env/lib/python2.7/site-packages/django/contrib/webdesign/templatetags/webdesign.py | 75 | 2194 | from __future__ import unicode_literals
from django.contrib.webdesign.lorem_ipsum import words, paragraphs
from django import template
register = template.Library()
class LoremNode(template.Node):
def __init__(self, count, method, common):
self.count, self.method, self.common = count, method, common
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == 'w':
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
return '\n\n'.join(paras)
@register.tag
def lorem(parser, token):
"""
Creates random Latin text useful for providing test data in templates.
Usage format::
{% lorem [count] [method] [random] %}
``count`` is a number (or variable) containing the number of paragraphs or
words to generate (default is 1).
``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for
plain-text paragraph blocks (default is ``b``).
``random`` is the word ``random``, which if given, does not use the common
paragraph (starting "Lorem ipsum dolor sit amet, consectetuer...").
Examples:
* ``{% lorem %}`` will output the common "lorem ipsum" paragraph
* ``{% lorem 3 p %}`` will output the common "lorem ipsum" paragraph
and two random paragraphs each wrapped in HTML ``<p>`` tags
* ``{% lorem 2 w random %}`` will output two random latin words
"""
bits = list(token.split_contents())
tagname = bits[0]
# Random bit
common = bits[-1] != 'random'
if not common:
bits.pop()
# Method bit
if bits[-1] in ('w', 'p', 'b'):
method = bits.pop()
else:
method = 'b'
# Count bit
if len(bits) > 1:
count = bits.pop()
else:
count = '1'
count = parser.compile_filter(count)
if len(bits) != 1:
raise template.TemplateSyntaxError("Incorrect format for %r tag" % tagname)
return LoremNode(count, method, common)
| mit |
DylanMcCall/rhythmbox-songinfo-context-menu | plugins/lyrics/DarkLyricsParser.py | 3 | 5935 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2008, 2009, 2010 Edgar Luna
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import string
import rb
import stringmatch
min_artist_match = .5
min_song_match = .5
class DarkLyricsParser (object):
"""Parser for Lyrics from www.darklyrics.com"""
def __init__(self, artist, title):
self.artist = artist
self.title = title
self.artist_ascii = ''
self.titlenumber = ''
def search(self, callback, *data):
"""Do a request of a specific url based on artist's first letter name."""
self.artist_ascii = ''.join(c for c in self.artist.lower() \
if c in string.ascii_letters)
self.artist_ascii = self.artist_ascii.lower()
firstcharurl = 'http://www.darklyrics.com/%s.html' % (self.artist_ascii[0])
loader = rb.Loader()
loader.get_url (firstcharurl, self.search_artist, callback, *data)
def search_artist(self, artist_page, callback, *data):
"""Search for the link to the page of artist in artists_page
"""
if artist_page is None:
callback (None, *data)
return
artist_page = artist_page.decode('iso-8859-1')
link_section = re.split ('tban.js', artist_page, 1)[1]
pattern_link = '<a href="'
pattern_artist = '([^"]*)">*([^<]*)<'
links = re.split (pattern_link, link_section.lower())
links.pop(0)
best_match = ()
smvalue_bestmatch = 0
for line in links:
artist = re.findall(pattern_artist, line)
if len(artist) == 0:
continue
artist_link, artist_name = artist[0]
artist_url = 'http://www.darklyrics.com/%s' % (artist_link)
if artist_link[:5] == 'http:':
continue
artist_name = artist_name.strip()
smvalue = stringmatch.string_match (artist_name, self.artist_ascii)
if smvalue > min_artist_match and smvalue > smvalue_bestmatch:
best_match = (smvalue, artist_url, artist_name)
smvalue_bestmatch = smvalue
if not best_match:
# Lyrics are located in external site
callback (None, *data)
return
loader = rb.Loader ()
self.artist = best_match[2]
loader.get_url (best_match[1], self.search_song, callback, *data)
class SongFound (object):
def __init__ (self, smvalue, title, number, album, artist):
self.smvalue = smvalue
self.title = title
self.number = number
self.album = album
self.artist = artist
def __str__(self):
return '(' + str(self.smvalue) + '. ' + self.title + '. ' + self.album + '. ' + self.artist + ')'
def search_song (self, songlist, callback, *data):
"""If artist's page is found, search_song looks for the song.
The artist page contains a list of all the albums and
links to the songs lyrics from this.
"""
if songlist is None:
callback (None, *data)
return
songlist = songlist.decode('iso-8859-1')
# Search for all the <a>
# filter for those that has the artist name string_match
# and for those which its content is artist string_match
# Sort by values given from string_match
# and get the best
link_section = re.split('LYRICS</h1>', songlist)[1]
link_section = link_section.lower()
pattern_song = '<a href="../lyrics/(.*)/(.*).html#([^"]+)">(.*)</a>'
matches = re.findall (pattern_song.lower(), link_section)
best_match = ""
for line in matches:
artist, album, number, title = line
smvalue = stringmatch.string_match (title.lower().replace(' ', '' ),
self.title.lower().replace(' ', ''))
if smvalue > min_song_match:
best_match = self.SongFound(smvalue,
title,
number,
album,
artist)
break
if not best_match:
callback (None, *data)
return
loader = rb.Loader ()
url = 'http://www.darklyrics.com/lyrics/%s/%s.html' % (best_match.artist, best_match.album)
self.title = best_match.title
self.titlenumber = best_match.number
loader.get_url (url, self.parse_lyrics, callback, *data)
def parse_lyrics (self, album, callback, *data):
"""In the album's page parse_lyrics get the lyrics of the song.
This page contains all the lyrics for self.album, but
this method get rides of everything that isn't the
lyrics of self.title"""
if album is None:
callback (None, *data)
return
album = album.decode('iso-8859-1')
titleline = '<a name="%s">%s. %s(.*?)</a>' % \
(self.titlenumber, self.titlenumber, re.escape(self.title.title()))
lyricmatch = re.split (titleline, album)
if len (lyricmatch) > 2:
lyrics = lyricmatch[2]
lyrics = lyrics.split('<h3>')[0]
lyrics = lyrics.replace ('\r', "")
lyrics = re.sub (r'<.*?>', "", lyrics)
lyrics = lyrics.strip ("\n")
title = "%s - %s\n\n" % (self.artist.title(), self.title.title())
lyrics = title + str (lyrics)
lyrics += "\n\nLyrics provided by Dark Lyrics"
callback (lyrics, *data)
else:
callback (None, *data)
return
| gpl-2.0 |
swenson/sagewiki | unidecode/unidecode/x06e.py | 252 | 4640 | data = (
'Ben ', # 0x00
'Yuan ', # 0x01
'Wen ', # 0x02
'Re ', # 0x03
'Fei ', # 0x04
'Qing ', # 0x05
'Yuan ', # 0x06
'Ke ', # 0x07
'Ji ', # 0x08
'She ', # 0x09
'Yuan ', # 0x0a
'Shibui ', # 0x0b
'Lu ', # 0x0c
'Zi ', # 0x0d
'Du ', # 0x0e
'[?] ', # 0x0f
'Jian ', # 0x10
'Min ', # 0x11
'Pi ', # 0x12
'Tani ', # 0x13
'Yu ', # 0x14
'Yuan ', # 0x15
'Shen ', # 0x16
'Shen ', # 0x17
'Rou ', # 0x18
'Huan ', # 0x19
'Zhu ', # 0x1a
'Jian ', # 0x1b
'Nuan ', # 0x1c
'Yu ', # 0x1d
'Qiu ', # 0x1e
'Ting ', # 0x1f
'Qu ', # 0x20
'Du ', # 0x21
'Feng ', # 0x22
'Zha ', # 0x23
'Bo ', # 0x24
'Wo ', # 0x25
'Wo ', # 0x26
'Di ', # 0x27
'Wei ', # 0x28
'Wen ', # 0x29
'Ru ', # 0x2a
'Xie ', # 0x2b
'Ce ', # 0x2c
'Wei ', # 0x2d
'Ge ', # 0x2e
'Gang ', # 0x2f
'Yan ', # 0x30
'Hong ', # 0x31
'Xuan ', # 0x32
'Mi ', # 0x33
'Ke ', # 0x34
'Mao ', # 0x35
'Ying ', # 0x36
'Yan ', # 0x37
'You ', # 0x38
'Hong ', # 0x39
'Miao ', # 0x3a
'Xing ', # 0x3b
'Mei ', # 0x3c
'Zai ', # 0x3d
'Hun ', # 0x3e
'Nai ', # 0x3f
'Kui ', # 0x40
'Shi ', # 0x41
'E ', # 0x42
'Pai ', # 0x43
'Mei ', # 0x44
'Lian ', # 0x45
'Qi ', # 0x46
'Qi ', # 0x47
'Mei ', # 0x48
'Tian ', # 0x49
'Cou ', # 0x4a
'Wei ', # 0x4b
'Can ', # 0x4c
'Tuan ', # 0x4d
'Mian ', # 0x4e
'Hui ', # 0x4f
'Mo ', # 0x50
'Xu ', # 0x51
'Ji ', # 0x52
'Pen ', # 0x53
'Jian ', # 0x54
'Jian ', # 0x55
'Hu ', # 0x56
'Feng ', # 0x57
'Xiang ', # 0x58
'Yi ', # 0x59
'Yin ', # 0x5a
'Zhan ', # 0x5b
'Shi ', # 0x5c
'Jie ', # 0x5d
'Cheng ', # 0x5e
'Huang ', # 0x5f
'Tan ', # 0x60
'Yu ', # 0x61
'Bi ', # 0x62
'Min ', # 0x63
'Shi ', # 0x64
'Tu ', # 0x65
'Sheng ', # 0x66
'Yong ', # 0x67
'Qu ', # 0x68
'Zhong ', # 0x69
'Suei ', # 0x6a
'Jiu ', # 0x6b
'Jiao ', # 0x6c
'Qiou ', # 0x6d
'Yin ', # 0x6e
'Tang ', # 0x6f
'Long ', # 0x70
'Huo ', # 0x71
'Yuan ', # 0x72
'Nan ', # 0x73
'Ban ', # 0x74
'You ', # 0x75
'Quan ', # 0x76
'Chui ', # 0x77
'Liang ', # 0x78
'Chan ', # 0x79
'Yan ', # 0x7a
'Chun ', # 0x7b
'Nie ', # 0x7c
'Zi ', # 0x7d
'Wan ', # 0x7e
'Shi ', # 0x7f
'Man ', # 0x80
'Ying ', # 0x81
'Ratsu ', # 0x82
'Kui ', # 0x83
'[?] ', # 0x84
'Jian ', # 0x85
'Xu ', # 0x86
'Lu ', # 0x87
'Gui ', # 0x88
'Gai ', # 0x89
'[?] ', # 0x8a
'[?] ', # 0x8b
'Po ', # 0x8c
'Jin ', # 0x8d
'Gui ', # 0x8e
'Tang ', # 0x8f
'Yuan ', # 0x90
'Suo ', # 0x91
'Yuan ', # 0x92
'Lian ', # 0x93
'Yao ', # 0x94
'Meng ', # 0x95
'Zhun ', # 0x96
'Sheng ', # 0x97
'Ke ', # 0x98
'Tai ', # 0x99
'Da ', # 0x9a
'Wa ', # 0x9b
'Liu ', # 0x9c
'Gou ', # 0x9d
'Sao ', # 0x9e
'Ming ', # 0x9f
'Zha ', # 0xa0
'Shi ', # 0xa1
'Yi ', # 0xa2
'Lun ', # 0xa3
'Ma ', # 0xa4
'Pu ', # 0xa5
'Wei ', # 0xa6
'Li ', # 0xa7
'Cai ', # 0xa8
'Wu ', # 0xa9
'Xi ', # 0xaa
'Wen ', # 0xab
'Qiang ', # 0xac
'Ze ', # 0xad
'Shi ', # 0xae
'Su ', # 0xaf
'Yi ', # 0xb0
'Zhen ', # 0xb1
'Sou ', # 0xb2
'Yun ', # 0xb3
'Xiu ', # 0xb4
'Yin ', # 0xb5
'Rong ', # 0xb6
'Hun ', # 0xb7
'Su ', # 0xb8
'Su ', # 0xb9
'Ni ', # 0xba
'Ta ', # 0xbb
'Shi ', # 0xbc
'Ru ', # 0xbd
'Wei ', # 0xbe
'Pan ', # 0xbf
'Chu ', # 0xc0
'Chu ', # 0xc1
'Pang ', # 0xc2
'Weng ', # 0xc3
'Cang ', # 0xc4
'Mie ', # 0xc5
'He ', # 0xc6
'Dian ', # 0xc7
'Hao ', # 0xc8
'Huang ', # 0xc9
'Xi ', # 0xca
'Zi ', # 0xcb
'Di ', # 0xcc
'Zhi ', # 0xcd
'Ying ', # 0xce
'Fu ', # 0xcf
'Jie ', # 0xd0
'Hua ', # 0xd1
'Ge ', # 0xd2
'Zi ', # 0xd3
'Tao ', # 0xd4
'Teng ', # 0xd5
'Sui ', # 0xd6
'Bi ', # 0xd7
'Jiao ', # 0xd8
'Hui ', # 0xd9
'Gun ', # 0xda
'Yin ', # 0xdb
'Gao ', # 0xdc
'Long ', # 0xdd
'Zhi ', # 0xde
'Yan ', # 0xdf
'She ', # 0xe0
'Man ', # 0xe1
'Ying ', # 0xe2
'Chun ', # 0xe3
'Lu ', # 0xe4
'Lan ', # 0xe5
'Luan ', # 0xe6
'[?] ', # 0xe7
'Bin ', # 0xe8
'Tan ', # 0xe9
'Yu ', # 0xea
'Sou ', # 0xeb
'Hu ', # 0xec
'Bi ', # 0xed
'Biao ', # 0xee
'Zhi ', # 0xef
'Jiang ', # 0xf0
'Kou ', # 0xf1
'Shen ', # 0xf2
'Shang ', # 0xf3
'Di ', # 0xf4
'Mi ', # 0xf5
'Ao ', # 0xf6
'Lu ', # 0xf7
'Hu ', # 0xf8
'Hu ', # 0xf9
'You ', # 0xfa
'Chan ', # 0xfb
'Fan ', # 0xfc
'Yong ', # 0xfd
'Gun ', # 0xfe
'Man ', # 0xff
)
| gpl-2.0 |
chris4795/u-boot-novena | test/py/u_boot_console_sandbox.py | 12 | 2376 | # Copyright (c) 2015 Stephen Warren
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
#
# SPDX-License-Identifier: GPL-2.0
# Logic to interact with the sandbox port of U-Boot, running as a sub-process.
import time
from u_boot_spawn import Spawn
from u_boot_console_base import ConsoleBase
class ConsoleSandbox(ConsoleBase):
"""Represents a connection to a sandbox U-Boot console, executed as a sub-
process."""
def __init__(self, log, config):
"""Initialize a U-Boot console connection.
Args:
log: A multiplexed_log.Logfile instance.
config: A "configuration" object as defined in conftest.py.
Returns:
Nothing.
"""
super(ConsoleSandbox, self).__init__(log, config, max_fifo_fill=1024)
def get_spawn(self):
"""Connect to a fresh U-Boot instance.
A new sandbox process is created, so that U-Boot begins running from
scratch.
Args:
None.
Returns:
A u_boot_spawn.Spawn object that is attached to U-Boot.
"""
bcfg = self.config.buildconfig
config_spl = bcfg.get('config_spl', 'n') == 'y'
fname = '/spl/u-boot-spl' if config_spl else '/u-boot'
print fname
cmd = []
if self.config.gdbserver:
cmd += ['gdbserver', self.config.gdbserver]
cmd += [
self.config.build_dir + fname,
'-v',
'-d',
self.config.dtb
]
return Spawn(cmd, cwd=self.config.source_dir)
def kill(self, sig):
"""Send a specific Unix signal to the sandbox process.
Args:
sig: The Unix signal to send to the process.
Returns:
Nothing.
"""
self.log.action('kill %d' % sig)
self.p.kill(sig)
def validate_exited(self):
"""Determine whether the sandbox process has exited.
If required, this function waits a reasonable time for the process to
exit.
Args:
None.
Returns:
Boolean indicating whether the process has exited.
"""
p = self.p
self.p = None
for i in xrange(100):
ret = not p.isalive()
if ret:
break
time.sleep(0.1)
p.close()
return ret
| gpl-2.0 |
Kingdread/qutebrowser | scripts/dev/run_pylint_on_tests.py | 8 | 2085 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Run pylint on tests.
This is needed because pylint can't check a folder which isn't a package:
https://bitbucket.org/logilab/pylint/issue/512/
"""
import os
import sys
import os.path
import subprocess
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
os.pardir))
from scripts import utils
def main():
"""Main entry point.
Return:
The pylint exit status.
"""
utils.change_cwd()
files = []
for dirpath, _dirnames, filenames in os.walk('tests'):
for fn in filenames:
if os.path.splitext(fn)[1] == '.py':
files.append(os.path.join(dirpath, fn))
disabled = [
'attribute-defined-outside-init',
'redefined-outer-name',
'unused-argument',
'missing-docstring',
'protected-access',
# https://bitbucket.org/logilab/pylint/issue/511/
'undefined-variable',
]
no_docstring_rgx = ['^__.*__$', '^setup$']
args = (['--disable={}'.format(','.join(disabled)),
'--no-docstring-rgx=({})'.format('|'.join(no_docstring_rgx))] +
sys.argv[1:] + files)
ret = subprocess.call(['pylint'] + args)
return ret
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Doc/includes/tzinfo-examples.py | 11 | 5062 | from datetime import tzinfo, timedelta, datetime
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
# A UTC class.
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
# A class capturing the platform's idea of local time.
import time as _time
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
# A complete implementation of current DST rules for major US time zones.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# US DST Rules
#
# This is a simplified (i.e., wrong for a few cases) set of rules for US
# DST start and end times. For a complete and up-to-date set of DST rules
# and timezone definitions, visit the Olson Database (or try pytz):
# http://www.twinsun.com/tz/tz-link.htm
# http://sourceforge.net/projects/pytz/ (might not be up-to-date)
#
# In the US, since 2007, DST starts at 2am (standard time) on the second
# Sunday in March, which is the first Sunday on or after Mar 8.
DSTSTART_2007 = datetime(1, 3, 8, 2)
# and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov.
DSTEND_2007 = datetime(1, 11, 1, 1)
# From 1987 to 2006, DST used to start at 2am (standard time) on the first
# Sunday in April and to end at 2am (DST time; 1am standard time) on the last
# Sunday of October, which is the first Sunday on or after Oct 25.
DSTSTART_1987_2006 = datetime(1, 4, 1, 2)
DSTEND_1987_2006 = datetime(1, 10, 25, 1)
# From 1967 to 1986, DST used to start at 2am (standard time) on the last
# Sunday in April (the one on or after April 24) and to end at 2am (DST time;
# 1am standard time) on the last Sunday of October, which is the first Sunday
# on or after Oct 25.
DSTSTART_1967_1986 = datetime(1, 4, 24, 2)
DSTEND_1967_1986 = DSTEND_1987_2006
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find start and end times for US DST. For years before 1967, return
# ZERO for no DST.
if 2006 < dt.year:
dststart, dstend = DSTSTART_2007, DSTEND_2007
elif 1986 < dt.year < 2007:
dststart, dstend = DSTSTART_1987_2006, DSTEND_1987_2006
elif 1966 < dt.year < 1987:
dststart, dstend = DSTSTART_1967_1986, DSTEND_1967_1986
else:
return ZERO
start = first_sunday_on_or_after(dststart.replace(year=dt.year))
end = first_sunday_on_or_after(dstend.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
| mit |
HwaAnnaLee/androguard | demos/crackme_dexlabs_patch.py | 38 | 1611 | #!/usr/bin/env python
import sys
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.bytecodes import dvm
from androguard.core.bytecodes import apk
from androguard.core.analysis import analysis
from androguard.core import androconf
class Nop(dvm.Instruction10x) :
def __init__(self) :
self.OP = 0x00
def patch_dex( m ) :
for i in m.get_methods() :
if i.get_class_name() == "Lorg/dexlabs/poc/dexdropper/DropActivity;" :
print i.get_class_name(), i.get_name()
patch_method_3( i )
# or
# patch_method_X( i )
def patch_method_1( method ) :
buff = method.get_code().get_bc().insn
buff = "\x00" * 0x12 + buff[0x12:]
method.get_code().get_bc().insn = buff
def patch_method_2( method ) :
method.set_code_idx( 0x12 )
instructions = [ j for j in method.get_instructions() ]
for j in range(0, 9) :
instructions.insert(0, Nop() )
method.set_instructions( instructions )
def patch_method_3( method ) :
method.set_code_idx( 0x12 )
instructions = [ j for j in method.get_instructions() ]
for j in range(0, 9) :
instructions.insert(0, dvm.Instruction10x(None, "\x00\x00") )
method.set_instructions( instructions )
FILENAME_INPUT = "apks/crash/crackme-obfuscator.apk"
FILENAME_OUTPUT = "./toto.dex"
androconf.set_debug()
a = apk.APK( FILENAME_INPUT )
vm = dvm.DalvikVMFormat( a.get_dex() )
vmx = analysis.VMAnalysis( vm )
patch_dex( vm )
new_dex = vm.save()
fd = open(FILENAME_OUTPUT, "w")
fd.write( new_dex )
fd.close() | apache-2.0 |
mhfowler/brocascoconut | mhf/models.py | 3 | 1734 | from django.db import models
#-----------------------------------------------------------------------------------------------------------------------
# Useful manager for all models.
#-----------------------------------------------------------------------------------------------------------------------
class XManager(models.Manager):
"""Adds get_or_none method to objects
"""
def get_or_none(self, **kwargs):
to_return = self.filter(**kwargs)
if to_return:
return to_return[0]
else:
return to_return
#-----------------------------------------------------------------------------------------------------------------------
# All models inherit from this
#-----------------------------------------------------------------------------------------------------------------------
class XModel(models.Model):
xg = XManager()
objects = models.Manager()
class Meta:
abstract = True
#-----------------------------------------------------------------------------------------------------------------------
# a number for keeping track of stuff
#-----------------------------------------------------------------------------------------------------------------------
class Stat(XModel):
name = models.CharField(max_length=100)
number = models.IntegerField(default=0)
#-----------------------------------------------------------------------------------------------------------------------
# for keeping track of ids
#-----------------------------------------------------------------------------------------------------------------------
class TwitterID(XModel):
key = models.CharField(max_length=100)
value = models.CharField(max_length=100)
| mit |
forge33/CouchPotatoServer | couchpotato/core/plugins/dashboard.py | 21 | 3895 | import random as rndm
import time
from CodernityDB.database import RecordDeleted
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
autoload = 'Dashboard'
class Dashboard(Plugin):
def __init__(self):
addApiView('dashboard.soon', self.getSoonView)
def getSoonView(self, limit_offset = None, random = False, late = False, **kwargs):
db = get_db()
now = time.time()
# Get profiles first, determine pre or post theater
profiles = fireEvent('profile.all', single = True)
pre_releases = fireEvent('quality.pre_releases', single = True)
# See what the profile contain and cache it
profile_pre = {}
for profile in profiles:
contains = {}
for q_identifier in profile.get('qualities', []):
contains['theater' if q_identifier in pre_releases else 'dvd'] = True
profile_pre[profile.get('_id')] = contains
# Add limit
limit = 12
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = tryInt(splt[0])
# Get all active medias
active_ids = [x['_id'] for x in fireEvent('media.with_status', 'active', with_doc = False, single = True)]
medias = []
if len(active_ids) > 0:
# Order by title or randomize
if not random:
orders_ids = db.all('media_title')
active_ids = [x['_id'] for x in orders_ids if x['_id'] in active_ids]
else:
rndm.shuffle(active_ids)
for media_id in active_ids:
try:
media = db.get('id', media_id)
except RecordDeleted:
log.debug('Record already deleted: %s', media_id)
continue
pp = profile_pre.get(media.get('profile_id'))
if not pp: continue
eta = media['info'].get('release_date', {}) or {}
coming_soon = False
# Theater quality
if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, media['info']['year'], single = True):
coming_soon = 'theater'
elif pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, media['info']['year'], single = True):
coming_soon = 'dvd'
if coming_soon:
# Don't list older movies
eta_date = eta.get(coming_soon)
eta_3month_passed = eta_date < (now - 7862400) # Release was more than 3 months ago
if (not late and not eta_3month_passed) or \
(late and eta_3month_passed):
add = True
# Check if it doesn't have any releases
if late:
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
for release in media.get('releases'):
if release.get('status') in ['snatched', 'available', 'seeding', 'downloaded']:
add = False
break
if add:
medias.append(media)
if len(medias) >= limit:
break
return {
'success': True,
'empty': len(medias) == 0,
'movies': medias,
}
getLateView = getSoonView
| gpl-3.0 |
Salat-Cx65/python-for-android | python3-alpha/python3-src/Lib/xml/dom/minidom.py | 45 | 66631 | """\
minidom.py -- a lightweight DOM implementation.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import codecs
import io
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __bool__(self):
return True
def toxml(self, encoding=None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding=None):
# indent = the indentation string to prepend, per level
# newl = the newline string to append
use_encoding = "utf-8" if encoding is None else encoding
writer = codecs.getwriter(use_encoding)(io.BytesIO())
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
if encoding is None:
return writer.stream.getvalue().decode(use_encoding)
else:
return writer.stream.getvalue()
def hasChildNodes(self):
if self.childNodes:
return True
else:
return False
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
if not child.data:
# empty text node; discard
if L:
L[-1].nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = child.previousSibling
child.unlink()
elif L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = node
child.unlink()
else:
L.append(child)
else:
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in list(self._user_data.items()):
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
# A Node is its own context manager, to ensure that an unlink() call occurs.
# This is similar to how a file object works.
def __enter__(self):
return self
def __exit__(self, et, ev, tb):
self.unlink()
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.__dict__["previousSibling"] = last
last.__dict__["nextSibling"] = node
childNodes.append(node)
node.__dict__["parentNode"] = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
if data:
data = data.replace("&", "&").replace("<", "<"). \
replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
nodeType = Node.ATTRIBUTE_NODE
attributes = None
ownerElement = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
# skip setattr for performance
d = self.__dict__
d["nodeName"] = d["name"] = qName
d["namespaceURI"] = namespaceURI
d["prefix"] = prefix
d['childNodes'] = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
if 'localName' in self.__dict__:
return self.__dict__['localName']
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def __setattr__(self, name, value):
d = self.__dict__
if name in ("value", "nodeValue"):
d["value"] = d["nodeValue"] = value
d2 = self.childNodes[0].__dict__
d2["data"] = d2["nodeValue"] = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
elif name in ("name", "nodeName"):
d["name"] = d["nodeName"] = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
else:
d[name] = value
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
d = self.__dict__
d['prefix'] = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
d['nodeName'] = d['name'] = newName
def _set_value(self, value):
d = self.__dict__
d['value'] = d['nodeValue'] = value
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[list(self._attrs.keys())[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def __contains__(self, key):
if isinstance(key, str):
return key in self._attrs
else:
return key in self._attrsNS
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
def _cmp(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return (id(self) > id(other)) - (id(self) < id(other))
def __eq__(self, other):
return self._cmp(other) == 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __le__(self, other):
return self._cmp(other) <= 0
def __lt__(self, other):
return self._cmp(other) < 0
def __ne__(self, other):
return self._cmp(other) != 0
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, str):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError("value must be a string or Attr object")
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if 'ownerElement' in n.__dict__:
n.__dict__['ownerElement'] = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if 'ownerElement' in n.__dict__:
n.__dict__['ownerElement'] = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self._attrs = {} # attributes are double-indexed:
self._attrsNS = {} # tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
def _get_localName(self):
if 'localName' in self.__dict__:
return self.__dict__['localName']
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
for attr in list(self._attrs.values()):
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
# for performance
d = attr.__dict__
d["value"] = d["nodeValue"] = value
d["ownerDocument"] = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
d = attr.__dict__
d["value"] = d["nodeValue"] = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
# for performance
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
d = attr.__dict__
d["prefix"] = prefix
d["nodeName"] = qualifiedName
d["value"] = d["nodeValue"] = value
d["ownerDocument"] = self.ownerDocument
self.setAttributeNode(attr)
else:
d = attr.__dict__
if value != attr.value:
d["value"] = d["nodeValue"] = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
d["prefix"] = prefix
d["nodeName"] = qualifiedName
def getAttributeNode(self, attrname):
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
return name in self._attrs
def hasAttributeNS(self, namespaceURI, localName):
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">%s"%(newl))
for node in self.childNodes:
node.writexml(writer,indent+addindent,addindent,newl)
writer.write("%s</%s>%s" % (indent,self.tagName,newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr.__dict__['_is_id'] = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.__dict__['ownerElement'] = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def normalize(self):
# For childless nodes, normalize() has nothing to do.
pass
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
def __init__(self, target, data):
self.target = self.nodeName = target
self.data = self.nodeValue = data
def _get_data(self):
return self.data
def _set_data(self, value):
d = self.__dict__
d['data'] = d['nodeValue'] = value
def _get_target(self):
return self.target
def _set_target(self, value):
d = self.__dict__
d['target'] = d['nodeName'] = value
def __setattr__(self, name, value):
if name == "data" or name == "nodeValue":
self.__dict__['data'] = self.__dict__['nodeValue'] = value
elif name == "target" or name == "nodeName":
self.__dict__['target'] = self.__dict__['nodeName'] = value
else:
self.__dict__[name] = value
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self.__dict__['data']
def _set_data(self, data):
d = self.__dict__
d['data'] = d['nodeValue'] = data
_get_nodeValue = _get_data
_set_nodeValue = _set_data
def __setattr__(self, name, value):
if name == "data" or name == "nodeValue":
self.__dict__['data'] = self.__dict__['nodeValue'] = value
else:
self.__dict__[name] = value
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
# Make sure we don't add an instance __dict__ if we don't already
# have one, at least when that's possible:
# XXX this does not work, CharacterData is an old-style class
# __slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s"%(indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
d = self.__dict__
d['data'] = content
d['nodeValue'] = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
self.data = self.nodeValue = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
# Make sure we don't add an instance __dict__ if we don't already
# have one, at least when that's possible:
# XXX this does not work, Text is an old-style class
# __slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError(name_or_tuple)
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
# XXX this does not work, this is an old-style class
# __slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribute is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
doctype = None
parentNode = None
previousSibling = nextSibling = None
implementation = DOMImplementation()
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="",
encoding = None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
# avoid __setattr__
d = n.__dict__
d['prefix'] = prefix
d['localName'] = localName
d['namespaceURI'] = namespaceURI
d['nodeName'] = name
if n.nodeType == Node.ELEMENT_NODE:
d['tagName'] = name
else:
# attribute node
d['name'] = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implementation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, str):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
| apache-2.0 |
hsu/ardupilot | Tools/autotest/pysim/iris_ros.py | 81 | 3622 | #!/usr/bin/env python
"""
Python interface to euroc ROS multirotor simulator
See https://pixhawk.org/dev/ros/sitl
"""
import time
import mav_msgs.msg as mav_msgs
import px4.msg as px4
import rosgraph_msgs.msg as rosgraph_msgs
import rospy
import sensor_msgs.msg as sensor_msgs
from aircraft import Aircraft
from rotmat import Vector3, Matrix3
def quat_to_dcm(q1, q2, q3, q4):
"""Convert quaternion to DCM."""
q3q3 = q3 * q3
q3q4 = q3 * q4
q2q2 = q2 * q2
q2q3 = q2 * q3
q2q4 = q2 * q4
q1q2 = q1 * q2
q1q3 = q1 * q3
q1q4 = q1 * q4
q4q4 = q4 * q4
m = Matrix3()
m.a.x = 1.0-2.0*(q3q3 + q4q4)
m.a.y = 2.0*(q2q3 - q1q4)
m.a.z = 2.0*(q2q4 + q1q3)
m.b.x = 2.0*(q2q3 + q1q4)
m.b.y = 1.0-2.0*(q2q2 + q4q4)
m.b.z = 2.0*(q3q4 - q1q2)
m.c.x = 2.0*(q2q4 - q1q3)
m.c.y = 2.0*(q3q4 + q1q2)
m.c.z = 1.0-2.0*(q2q2 + q3q3)
return m
class IrisRos(Aircraft):
"""A IRIS MultiCopter from ROS."""
def __init__(self):
Aircraft.__init__(self)
self.max_rpm = 1200
self.have_new_time = False
self.have_new_imu = False
self.have_new_pos = False
topics = {
"/clock" : (self.clock_cb, rosgraph_msgs.Clock),
"/iris/imu" : (self.imu_cb, sensor_msgs.Imu),
"/iris/vehicle_local_position" : (self.pos_cb, px4.vehicle_local_position),
}
rospy.init_node('ArduPilot', anonymous=True)
for topic in topics.keys():
(callback, msgtype) = topics[topic]
rospy.Subscriber(topic, msgtype, callback)
self.motor_pub = rospy.Publisher('/iris/command/motor_speed',
mav_msgs.CommandMotorSpeed,
queue_size=1)
self.last_time = 0
# spin() simply keeps python from exiting until this node is stopped
# rospy.spin()
def clock_cb(self, msg):
self.time_now = self.time_base + msg.clock.secs + msg.clock.nsecs*1.0e-9
self.have_new_time = True
def imu_cb(self, msg):
self.gyro = Vector3(msg.angular_velocity.x,
-msg.angular_velocity.y,
-msg.angular_velocity.z)
self.accel_body = Vector3(msg.linear_acceleration.x,
-msg.linear_acceleration.y,
-msg.linear_acceleration.z)
self.dcm = quat_to_dcm(msg.orientation.w,
msg.orientation.x,
-msg.orientation.y,
-msg.orientation.z)
self.have_new_imu = True
def pos_cb(self, msg):
self.velocity = Vector3(msg.vx, msg.vy, msg.vz)
self.position = Vector3(msg.x, msg.y, msg.z)
self.have_new_pos = True
def update(self, actuators):
while self.last_time == self.time_now or not self.have_new_time or not self.have_new_imu or not self.have_new_pos:
time.sleep(0.001)
self.have_new_time = False
self.have_new_pos = False
self.have_new_imu = False
# create motor speed message
msg = mav_msgs.CommandMotorSpeed()
msg.header.stamp = rospy.get_rostime()
motor_speed = []
for i in range(len(actuators)):
motor_speed.append(actuators[i]*self.max_rpm)
msg.motor_speed = motor_speed
self.last_time = self.time_now
self.motor_pub.publish(msg)
# update lat/lon/altitude
self.update_position()
| gpl-3.0 |
gdgellatly/OCB1 | addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/rpc.py | 381 | 5849 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import socket
import xmlrpclib
#import tiny_socket
import re
class RPCGateway(object):
def __init__(self, host, port, protocol):
self.protocol = protocol
self.host = host
self.port = port
def get_url(self):
"""Get the url
"""
return "%s://%s:%s/"%(self.protocol, self.host, self.port)
def listdb(self):
"""Get the list of databases.
"""
pass
def login(self, db, user, password):
pass
def execute(self, obj, method, *args):
pass
class RPCSession(object):
def __init__(self, url):
m = re.match('^(http[s]?://|socket://)([\w.\-]+):(\d{1,5})$', url or '')
host = m.group(2)
port = m.group(3)
protocol = m.group(1)
if not m:
return -1
if protocol == 'http://' or protocol == 'https://':
self.gateway = XMLRPCGateway(host, port, protocol[:-3])
elif protocol == 'socket://':
self.gateway = NETRPCGateway(host, port)
def listdb(self):
return self.gateway.listdb()
def login(self, db, user, password):
if password is None:
return -1
uid = self.gateway.login(db, user or '', password or '')
if uid <= 0:
return -1
self.uid = uid
self.db = db
self.password = password
self.open = True
return uid
def execute(self, obj, method, *args):
try:
result = self.gateway.execute(obj, method, *args)
return self.__convert(result)
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
def __convert(self, result):
if isinstance(result, basestring):
# try to convert into unicode string
try:
return ustr(result)
except Exception, e:
return result
elif isinstance(result, list):
return [self.__convert(val) for val in result]
elif isinstance(result, tuple):
return tuple([self.__convert(val) for val in result])
elif isinstance(result, dict):
newres = {}
for key, val in result.items():
newres[key] = self.__convert(val)
return newres
else:
return result
class XMLRPCGateway(RPCGateway):
"""XML-RPC implementation.
"""
def __init__(self, host, port, protocol='http'):
super(XMLRPCGateway, self).__init__(host, port, protocol)
global rpc_url
rpc_url = self.get_url() + 'xmlrpc/'
def listdb(self):
global rpc_url
sock = xmlrpclib.ServerProxy(rpc_url + 'db')
try:
return sock.list()
except Exception, e:
return -1
def login(self, db, user, password):
global rpc_url
sock = xmlrpclib.ServerProxy(rpc_url + 'common')
try:
res = sock.login(db, user, password)
except Exception, e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return -1
return res
def execute(self, sDatabase, UID, sPassword, obj, method, *args):
global rpc_url
sock = xmlrpclib.ServerProxy(rpc_url + 'object')
return sock.execute(sDatabase,UID,sPassword, obj ,method,*args)
class NETRPCGateway(RPCGateway):
def __init__(self, host, port):
super(NETRPCGateway, self).__init__(host, port, 'socket')
def listdb(self):
sock = mysocket()
try:
sock.connect(self.host, self.port)
sock.mysend(('db', 'list'))
res = sock.myreceive()
sock.disconnect()
return res
except Exception, e:
return -1
def login(self, db, user, password):
sock = mysocket()
try:
sock.connect(self.host, self.port)
sock.mysend(('common', 'login', db, user, password))
res = sock.myreceive()
sock.disconnect()
except Exception, e:
return -1
return res
def execute(self,obj, method, *args):
sock = mysocket()
try:
sock.connect(self.host, self.port)
data=(('object', 'execute',obj,method,)+args)
sock.mysend(data)
res=sock.myreceive()
sock.disconnect()
return res
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
openstack/python-openstacksdk | openstack/tests/unit/test_placement_rest.py | 1 | 2593 | # Copyright (c) 2018 Red Hat, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from keystoneauth1 import exceptions
from openstack.tests.unit import base
@ddt.ddt
class TestPlacementRest(base.TestCase):
def setUp(self):
super(TestPlacementRest, self).setUp()
self.use_placement()
def _register_uris(self, status_code=None):
uri = dict(
method='GET',
uri=self.get_mock_url(
'placement', 'public', append=['allocation_candidates']),
json={})
if status_code is not None:
uri['status_code'] = status_code
self.register_uris([uri])
def _validate_resp(self, resp, status_code):
self.assertEqual(status_code, resp.status_code)
self.assertEqual(
'https://placement.example.com/allocation_candidates',
resp.url)
self.assert_calls()
@ddt.data({}, {'raise_exc': False}, {'raise_exc': True})
def test_discovery(self, get_kwargs):
self._register_uris()
# Regardless of raise_exc, a <400 response doesn't raise
rs = self.cloud.placement.get('/allocation_candidates', **get_kwargs)
self._validate_resp(rs, 200)
@ddt.data({}, {'raise_exc': False})
def test_discovery_err(self, get_kwargs):
self._register_uris(status_code=500)
# >=400 doesn't raise by default or with explicit raise_exc=False
rs = self.cloud.placement.get('/allocation_candidates', **get_kwargs)
self._validate_resp(rs, 500)
def test_discovery_exc(self):
self._register_uris(status_code=500)
# raise_exc=True raises a ksa exception appropriate to the status code
ex = self.assertRaises(
exceptions.InternalServerError,
self.cloud.placement.get, '/allocation_candidates', raise_exc=True)
self._validate_resp(ex.response, 500)
def test_microversion_discovery(self):
self.assertEqual(
(1, 17),
self.cloud.placement.get_endpoint_data().max_microversion)
self.assert_calls()
| apache-2.0 |
darkryder/django | tests/queryset_pickle/models.py | 281 | 1904 | import datetime
from django.db import DJANGO_VERSION_PICKLE_KEY, models
from django.utils import six
from django.utils.translation import ugettext_lazy as _
def standalone_number():
return 1
class Numbers(object):
@staticmethod
def get_static_number():
return 2
class PreviousDjangoVersionQuerySet(models.QuerySet):
def __getstate__(self):
state = super(PreviousDjangoVersionQuerySet, self).__getstate__()
state[DJANGO_VERSION_PICKLE_KEY] = '1.0'
return state
class MissingDjangoVersionQuerySet(models.QuerySet):
def __getstate__(self):
state = super(MissingDjangoVersionQuerySet, self).__getstate__()
del state[DJANGO_VERSION_PICKLE_KEY]
return state
class Group(models.Model):
name = models.CharField(_('name'), max_length=100)
objects = models.Manager()
previous_django_version_objects = PreviousDjangoVersionQuerySet.as_manager()
missing_django_version_objects = MissingDjangoVersionQuerySet.as_manager()
class Event(models.Model):
title = models.CharField(max_length=100)
group = models.ForeignKey(Group, models.CASCADE)
class Happening(models.Model):
when = models.DateTimeField(blank=True, default=datetime.datetime.now)
name = models.CharField(blank=True, max_length=100, default="test")
number1 = models.IntegerField(blank=True, default=standalone_number)
if six.PY3:
# default serializable on Python 3 only
number2 = models.IntegerField(blank=True, default=Numbers.get_static_number)
class Container(object):
# To test pickling we need a class that isn't defined on module, but
# is still available from app-cache. So, the Container class moves
# SomeModel outside of module level
class SomeModel(models.Model):
somefield = models.IntegerField()
class M2MModel(models.Model):
groups = models.ManyToManyField(Group)
| bsd-3-clause |
swindonmakers/AccessibleThingController | libraries/ArduinoJson/third-party/cpplint/cpplint.py | 48 | 134240 | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Here are some issues that I've had people identify in my code during reviews,
# that I think are possible to flag automatically in a lint tool. If these were
# caught by lint, it would save time both for myself and that of my reviewers.
# Most likely, some of these are beyond the scope of the current lint framework,
# but I think it is valuable to retain these wish-list items even if they cannot
# be immediately implemented.
#
# Suggestions
# -----------
# - Check for no 'explicit' for multi-arg ctor
# - Check for boolean assign RHS in parens
# - Check for ctor initializer-list colon position and spacing
# - Check that if there's a ctor, there should be a dtor
# - Check accessors that return non-pointer member variables are
# declared const
# - Check accessors that return non-const pointer member vars are
# *not* declared const
# - Check for using public includes for testing
# - Check for spaces between brackets in one-line inline method
# - Check for no assert()
# - Check for spaces surrounding operators
# - Check for 0 in pointer context (should be NULL)
# - Check for 0 in char context (should be '\0')
# - Check for camel-case method name conventions for methods
# that are not simple inline getters and setters
# - Check that base classes have virtual destructors
# put " // namespace" after } that closes a namespace, with
# namespace's name after 'namespace' if it is named.
# - Do not indent namespace contents
# - Avoid inlining non-trivial constructors in header files
# include base/basictypes.h if DISALLOW_EVIL_CONSTRUCTORS is used
# - Check for old-school (void) cast for call-sites of functions
# ignored return value
# - Check gUnit usage of anonymous namespace
# - Check for class declaration order (typedefs, consts, enums,
# ctor(s?), dtor, friend declarations, methods, member vars)
#
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
EXTENSIONS = ['c', 'cc', 'cpp', 'cxx', 'c++',
'h', 'hpp', 'hxx', 'h++']
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Linted extensions are %s. Other file types will be ignored.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
""" % (EXTENSIONS)
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
# \ used for clearer layout -- pylint: disable-msg=C6013
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/nolint',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/rtti',
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/virtual',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/indent',
'whitespace/labels',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# Headers that we consider STL headers.
_STL_HEADERS = frozenset([
'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'new',
'pair.h', 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
'utility', 'vector', 'vector.h',
])
# Non-STL C++ system headers.
_CPP_HEADERS = frozenset([
'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream',
'istream.h', 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
'numeric', 'ostream', 'ostream.h', 'parsestream.h', 'pfstream.h',
'PlotFile.h', 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h',
'ropeimpl.h', 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
_regexp_compile_cache = {}
# Finds occurrences of NOLINT or NOLINT(...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
if sys.version_info < (3,):
def u(x):
return codecs.unicode_escape_decode(x)[0]
TEXT_TYPE = unicode
# BINARY_TYPE = str
range = xrange
itervalues = dict.itervalues
iteritems = dict.iteritems
else:
def u(x):
return x
TEXT_TYPE = str
# BINARY_TYPE = bytes
itervalues = dict.values
iteritems = dict.items
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
category = matched.group(1)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
header_path: Header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
canonical_header = self.CanonicalizeAlphabeticalOrder(header_path)
if self._last_header > canonical_header:
return False
self._last_header = canonical_header
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in iteritems(self.errors_by_category):
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in EXTENSIONS
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
m = '%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence)
sys.stderr.write(m)
# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
for linenum in range(len(lines)):
self.lines.append(CleanseComments(lines[linenum]))
elided = self._CollapseStrings(lines[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
num_open = line.count(startchar) - line.count(endchar)
while linenum < clean_lines.NumLines() and num_open > 0:
linenum += 1
line = clean_lines.elided[linenum]
num_open += line.count(startchar) - line.count(endchar)
# OK, now find the endchar that actually got us back to even
endpos = len(line)
while num_open >= 0:
endpos = line.rfind(')', 0, endpos)
num_open -= 1 # chopped off another )
return (line, linenum, endpos + 1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in range(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I):
break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
fileinfo = FileInfo(filename)
return re.sub(r'[-./\s]', '_', fileinfo.RepositoryName()).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForUnicodeReplacementCharacters(filename, lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u('\ufffd') in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _ClassInfo(object):
"""Stores information about a class."""
def __init__(self, name, clean_lines, linenum):
self.name = name
self.linenum = linenum
self.seen_open_brace = False
self.is_derived = False
self.virtual_method_linenumber = None
self.has_virtual_destructor = False
self.brace_depth = 0
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.lines[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
class _ClassState(object):
"""Holds the current state of the parse relating to class declarations.
It maintains a stack of _ClassInfos representing the parser's guess
as to the current nesting of class declarations. The innermost class
is at the top (back) of the stack. Typically, the stack will either
be empty or have exactly one entry.
"""
def __init__(self):
self.classinfo_stack = []
def CheckFinished(self, filename, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
if self.classinfo_stack:
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
error(filename, self.classinfo_stack[0].linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
self.classinfo_stack[0].name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
class_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
- classes with virtual methods need virtual destructors (compiler warning
available, but not turned on yet.)
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(auto|register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Track class entry and exit, and attempt to find cases within the
# class declaration that don't meet the C++ style
# guidelines. Tracking is very dependent on the code matching Google
# style guidelines, but it seems to perform well enough in testing
# to be a worthwhile addition to the checks.
classinfo_stack = class_state.classinfo_stack
# Look for a class declaration. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
'(class|struct)\s+([A-Z_]+\s+)*(\w+(::\w+)*)', line)
if class_decl_match:
classinfo_stack.append(_ClassInfo(
class_decl_match.group(4), clean_lines, linenum))
# Everything else in this function uses the top of the stack if it's
# not empty.
if not classinfo_stack:
return
classinfo = classinfo_stack[-1]
# If the opening brace hasn't been seen look for it and also
# parent class declarations.
if not classinfo.seen_open_brace:
# If the line has a ';' in it, assume it's a forward declaration or
# a single-line class declaration, which we won't process.
if line.find(';') != -1:
classinfo_stack.pop()
return
classinfo.seen_open_brace = (line.find('{') != -1)
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', line):
classinfo.is_derived = True
if not classinfo.seen_open_brace:
return # Everything else in this function is for after open brace
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s\s*(?:<\w+>\s*)?&' % re.escape(base_classname),
args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
# Look for methods declared virtual.
if Search(r'\bvirtual\b', line):
classinfo.virtual_method_linenumber = linenum
# Only look for a destructor declaration on the same line. It would
# be extremely unlikely for the destructor declaration to occupy
# more than one line.
if Search(r'~%s\s*\(' % base_classname, line):
classinfo.has_virtual_destructor = True
# Look for class end.
brace_depth = classinfo.brace_depth
brace_depth = brace_depth + line.count('{') - line.count('}')
if brace_depth <= 0:
classinfo = classinfo_stack.pop()
# Try to detect missing virtual destructor declarations.
# For now, only warn if a non-derived class with virtual methods lacks
# a virtual destructor. This is to make it less likely that people will
# declare derived virtual destructors without declaring the base
# destructor virtual.
if ((classinfo.virtual_method_linenumber is not None) and
(not classinfo.has_virtual_destructor) and
(not classinfo.is_derived)): # Only warn for base classes
error(filename, classinfo.linenum, 'runtime/virtual', 4,
'The class %s probably needs a virtual destructor due to '
'having virtual method(s), one declared at line %d.'
% (classinfo.name, classinfo.virtual_method_linenumber))
else:
classinfo.brace_depth = brace_depth
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in range(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable-msg=C6403
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckSpacing(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
if IsBlankLine(line):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if (prevbrace != -1 and prev_line[prevbrace:].find('}') == -1
and prev_line[:prevbrace].find('namespace') == -1):
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Blank line at the start of a code block. Is this needed?')
# This doesn't ignore whitespace at the end of a namespace block
# because that is too hard without pairing open/close braces;
# however, a special exception is made for namespace closing
# brackets which have a comment containing "namespace".
#
# Also, ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('namespace') == -1
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Blank line at the end of a code block. Is this needed?')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
# Alas, we can't test < or > because they're legitimately used sans spaces
# (a->b, vector<int> a). The only time we can tell is a < with no >, and
# only if it's not template params list spilling into the next line.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if not match:
# Note that while it seems that the '<[^<]*' term in the following
# regexp could be simplified to '<.*', which would indeed match
# the same class of strings, the [^<] means that searching for the
# regexp takes linear rather than quadratic time.
if not Search(r'<[^<]*,\s*$', line): # template params spill
match = Search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << and >> when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
match = Search(r'[^0-9\s](<<|>>)[^0-9\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if not len(match.group(2)) in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
if Search(r',[^\s]', line):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
if Search(r'[^ ({]{', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use { } instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use { } instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use { } instead.')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.linenum <= 24 or
linenum <= class_info.linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.linenum
for i in range(class_info.linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone
# is using braces in a block to explicitly create a new scope,
# which is commonly used to control the lifetime of
# stack-allocated variables. We don't detect this perfectly: we
# just don't complain if the last non-whitespace character on the
# previous non-blank line is ';', ':', '{', or '}'.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if not Search(r'[;:}{]\s*$', prevline):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array.
# We can't tell in general, but we can for some common cases.
prevlinenum = linenum
while True:
(prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum)
if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'):
line = prevline + line
else:
break
if (Search(r'{.*}\s*;', line) and
line.count('{') == line.count('}') and
not Search(r'struct|class|enum|\s*=\s*{', line)):
error(filename, linenum, 'readability/braces', 4,
"You don't need a ; after a }")
def ReplaceableCheck(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
For example suggest using CHECK_EQ instead of CHECK(a == b) and
similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
Args:
operator: The C++ operator used in the CHECK.
macro: The CHECK or EXPECT macro being called.
line: The current source line.
Returns:
True if the CHECK can be replaced with a more specific one.
"""
# This matches decimal and hex integers, strings, and chars (in that order).
match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
# Expression to match two sides of the operator with something that
# looks like a literal, since CHECK(x == iterator) won't compile.
# This means we can't catch all the cases where a more specific
# CHECK is possible, but it's less annoying than dealing with
# extraneous warnings.
match_this = (r'\s*' + macro + r'\((\s*' +
match_constant + r'\s*' + operator + r'[^<>].*|'
r'.*[^<>]' + operator + r'\s*' + match_constant +
r'\s*\))')
# Don't complain about CHECK(x == NULL) or similar because
# CHECK_EQ(x, NULL) won't compile (requires a cast).
# Also, don't complain about more complex boolean expressions
# involving && or || such as CHECK(a == b || c == d).
return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[linenum].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[linenum] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if ReplaceableCheck(operator, current_macro, line):
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, TEXT_TYPE):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, class_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
error: The function to call with any errors found.
"""
raw_lines = clean_lines.raw_lines
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Labels should always be indented at least one space.
elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$',
line):
error(filename, linenum, 'whitespace/labels', 4,
'Labels should always be indented at least one space. '
'If this is a member-initializer list in a constructor or '
'the base class list in a class definition, the colon should '
'be on the following line.')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
if line_width > 100:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than 100 characters')
elif line_width > 80:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= 80 characters long')
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 4,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
if class_state and class_state.classinfo_stack:
CheckSectionSpacing(filename, clean_lines,
class_state.classinfo_stack[-1], linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_stl_h = include in _STL_HEADERS
is_cpp_h = is_stl_h or include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
if not include_state.IsInAlphabeticalOrder(include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check for non-const references in functions. This is tricky because &
# is also used to take the address of something. We allow <> for templates,
# (ignoring whatever is between the braces) and : for classes.
# These are complicated re's. They try to capture the following:
# paren (for fn-prototype start), typename, &, varname. For the const
# version, we're willing for const to be before typename or after
# Don't check the implementation on same line.
fnline = line.split('{', 1)[0]
if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) >
len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?'
r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) +
len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+',
fnline))):
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>".
if not Search(
r'(swap|Swap|operator[<>][<>])\s*\(\s*(?:[\w:]|<.*>)+\s*&',
fnline):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer.')
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if match:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
if (match.group(1) is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Match(r'^\s*MockCallback<.*>', line))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
match.group(2))
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
if Search(
r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
match.group(3)):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
# Check that we're not using RTTI outside of testing code.
if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename):
error(filename, linenum, 'runtime/rtti', 5,
'Do not use dynamic_cast<>. If you need to cast within a class '
"hierarchy, use static_cast<> to upcast. Google doesn't support "
'RTTI.')
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
if Search(r'\bsscanf\b', line):
error(filename, linenum, 'runtime/printf', 1,
'sscanf can be ok, but is slow and can overflow buffers.')
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match:
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
This also handles sizeof(type) warnings, due to similarity of content.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# e.g., sizeof(int)
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
error(filename, linenum, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return True
remainder = line[match.end(0):]
# The close paren is for function pointers as arguments to a function.
# eg, void foo(void (*bar)(int));
# The semicolon check is a more basic function check; also possibly a
# function pointer typedef.
# eg, void foo(int); or void foo(int) const;
# The equals check is for function pointer assignment.
# eg, void *(*foo)(int) = ...
# The > is for MockCallback<...> ...
#
# Right now, this will only catch cases where there's a single argument, and
# it's unnamed. It should probably be expanded to check for multiple
# arguments with some unnamed.
function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)|>))', remainder)
if function_match:
if (not function_match.group(3) or
function_match.group(3) == ';' or
('MockCallback<' not in raw_line and
'/*' not in raw_line)):
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in range(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = list(include_state.keys())
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'Omit template arguments from make_pair OR use pair directly OR'
' if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, error, extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, class_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
error)
CheckForNonStandardConstructs(filename, clean_lines, line,
class_state, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
class_state = _ClassState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in range(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, class_state, error,
extra_check_functions)
class_state.CheckFinished(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForUnicodeReplacementCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if (filename != '-' and file_extension not in EXTENSIONS):
sys.stderr.write('Ignoring %s; extension not in %s\n' % (filename, EXTENSIONS))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if not val in ('emacs', 'vs7'):
PrintUsage('The only allowed output formats are emacs and vs7.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
backup_err = sys.stderr
try:
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReader(sys.stderr,
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
finally:
sys.stderr = backup_err
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| gpl-3.0 |
alexrudnick/terere | dependencies/AntiMorfo-1.2/antimorfo/morpho/morphology.py | 2 | 35105 | """
This file is part of L3Morpho.
L3Morpho is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
L3Morpho is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with L3Morpho. If not, see <http://www.gnu.org/licenses/>.
--------------------------------------------------------------------
Author: Michael Gasser <[email protected]>
Morphological processing.
Morphology and POSMorphology objects.
Analysis, generation.
Loading, composing, saving FSTs.
"""
from .fst import *
## Default punctuation characters
PUNCTUATION = r'[“‘”’–—…¿¡•:;/\\,<>?.!%$()[\]{}|#@&*\-_+=\"\'\`^~]'
## Default alphabetic characters
CHARACTERS = r'[a-zA-Z]'
class Morphology(dict):
"""A dict of POSMorphology dicts, one for each POS class that has bound morphology."""
version = 1.3
complex = 0
simple = 1
def __init__(self, fsh=None, pos_morphs=[],
punctuation=PUNCTUATION, characters=CHARACTERS):
dict.__init__(self)
if fsh:
self.set_fsh(*fsh)
else:
self.fsh = None
self.pos = []
for pos_morph in pos_morphs:
label = pos_morph[0]
posmorph = POSMorphology(*pos_morph)
self[label] = posmorph
posmorph.morphology = self
self.pos.append(label)
# Function that simplifies orthography
self.simplify = None
# Function that converts phonological to orthographic representation
self.orthographize = None
# Function that returns trivially analyzable forms
self.triv_anal = None
# Function that converts (POS, root, citation, FS) to a string
self.anal2string = None
# Pair of lists of unanalyzable words: (complex, simple)
self.words = [[], []]
self.words_phon = [{}, {}]
self.seg_units = []
self.language = None
# Dictionary of preanalyzed words (varying POS)
self.analyzed = {}
self.analyzed_phon = {}
# FST for making forms phonetic
self.phon_fst = None
self.directory = ''
self.punctuation = punctuation
# Make punctuation regular expression objects and substitution string
self.init_punc(characters, punctuation)
def init_punc(self, chars, punc):
'''Make punctuation regular expression objects and substitution string.'''
self.punc_after_re = re.compile(r'(' + chars + r')(' + punc + r')', re.U)
self.punc_before_re = re.compile(r'(' + punc + r')(' + chars + r')', re.U)
self.punc_sub = r'\1 \2'
def sep_punc(self, text):
"""Separate punctuation from words."""
text = self.punc_after_re.sub(self.punc_sub, text)
text = self.punc_before_re.sub(self.punc_sub, text)
return text
def is_word(self, word, simple=False, ortho=True):
"""Is word an unanalyzable word?"""
if ortho and word in self.punctuation:
return word
if ortho and not self.words:
return None
if not ortho and not self.words_phon:
return None
if ortho:
word_rec = self.words[Morphology.simple if simple else Morphology.complex]
return word in word_rec and word
else:
word_rec = self.words_phon[Morphology.simple if simple else Morphology.complex]
return word_rec.get(word, False)
def set_words(self, filename='words.lex', ortho=True, simplify=False):
'''Set the list/dict of unanalyzed words, reading them in from a file, one per line.'''
if not ortho:
filename = 'words_phon.lex'
path = os.path.join(self.directory, filename)
position = Morphology.simple if simplify else Morphology.complex
if os.path.exists(path):
file = open(path, encoding='utf8')
if ortho:
# Read in the words as a list
self.words[position] = [w.strip() for w in file]
else:
# Read in ortho:phon pairs as a dict
self.words_phon[position] = dict([w.split() for w in file])
file.close()
else:
self.words = []
self.words_phon = []
def get_analyzed(self, word):
'''Get the pre-analyzed FS for word.'''
return self.analyzed.get(word)
def set_analyzed(self, filename='analyzed.lex', ortho=True, verbose=False):
'''Set the dict of analyzed words, reading them in from a file, one per line.'''
if not ortho:
filename = 'analyzed_phon.lex'
path = os.path.join(self.directory, filename)
if os.path.exists(path):
file = open(path, encoding='utf8')
if verbose:
print('Storing pre-analyzed forms')
if ortho:
for line in file:
# Word and FS separated by two spaces
word, anal = line.split(' ')
fs = FSSet.parse(anal.strip())
self.analyzed[word] = fs
else:
for line in file:
# Word and FS separated by two spaces
word, phon, anal = line.split(' ')
fs = FSSet.parse(anal.strip())
self.analyzed_phon[word] = (phon, fs)
file.close()
def set_fsh(self, *label_fs):
"""Set the Feature Structure Hierarchy for this language's morphology."""
self.fsh = FSHier()
self.fsh.parse(label_fs)
def trivial_anal(self, form):
"""Attempt to trivially analyze form."""
return self.triv_anal and self.triv_anal(form)
def anal(self, form, pos, to_dict=True, preproc=False, guess=False, phon=False, segment=False,
trace=False, tracefeat=''):
return self[pos].anal(form, to_dict=to_dict, preproc=preproc, guess=guess, phon=phon, segment=segment,
trace=trace, tracefeat=tracefeat)
def gen(self, form, features, pos, from_dict=True, postproc=False, guess=False, phon=False, segment=False,
trace=False):
return self[pos].gen(form, features, from_dict=from_dict, postproc=postproc,
guess=guess, phon=phon, segment=segment, trace=trace)
def load_fst(self, label, generate=False, create_fst=True, save=False, verbose=False):
"""Load an FST that is not associated with a particular POS."""
path = os.path.join(self.directory, label + '.cas')
casc = fst = None
if verbose: print('Looking for cascade at', path)
if os.path.exists(path):
# Load each of the FSTs in the cascade and compose them
if verbose: print('Loading cascade...')
casc = FSTCascade.load(path, seg_units=self.seg_units,
create_networks=True)
# create_fst is False in case we just want to load the individuals fsts.
if create_fst:
if verbose:
print("Composing FST")
fst = casc.compose(backwards=True, trace=verbose, relabel=True)
if generate:
fst = fst.inverted()
if save:
FST.write(fst, filename=os.path.join(self.directory, label + '.fst'), shelve=False)
return fst
return casc
def restore_fst(self, label):
'''Return the FST with label.'''
cas_path = os.path.join(self.directory, label + '.cas')
cascade = None
if os.path.exists(cas_path):
cascade = FSTCascade.load(cas_path,
seg_units=self.seg_units, create_networks=True,
verbose=False)
if cascade != None:
# Look for the full, explicit FST
fst_file = label + '.fst'
fst_path = os.path.join(self.directory, fst_file)
if os.path.exists(fst_path):
return FST.restore_parse(self.directory, fst_file, cascade=cascade,
weighting=UNIFICATION_SR,
seg_units=self.seg_units,
create_weights=True)
def load_phon_fst(self, save=True, verbose=True):
"""Load the phon FST if there is one."""
cascade = FSTCascade.load(os.path.join(self.directory, 'phon.cas'),
seg_units=self.seg_units, create_networks=True,
verbose=verbose)
if cascade:
fst = cascade.compose(backwards=True, trace=verbose)
if fst:
fst = fst.inverted()
if save:
FST.write(fst, filename=os.path.join(self.directory, 'phon.fst'), shelve=False)
self.phon_fst = fst
return fst
def ortho2phon(self, word):
'''Attempt to convert a word to its phonetic form. (Assumes word is already romanized.)'''
if word.isdigit():
# word consists only of numbers
return [word]
if self.words_phon:
words = self.words_phon[Morphology.complex]
if not isinstance(words, dict):
print('Words dict is not loaded!')
return
phon = words.get(word, '')
if phon:
return [phon]
elif word in self.analyzed_phon:
form, fss = self.analyzed_phon[word]
return [form]
def phonetic(self, form):
'''Make a form phonetic, calling the phon FST on it.'''
fst = self.phon_fst
if fst:
phoneticized = fst.transduce(form, seg_units=self.seg_units)
if phoneticized:
return phoneticized[0][0]
return form
class POSMorphology:
"""Lists of MorphCats and GramCats, anal and gen FSTs for a particular POS class."""
# Indices for different FSTs in self.fsts
# Top level
anal_i = 0
gen_i = 1
# Indices within sublists
guess_i = 1
simp_i = 2
phon_i = 3
guessphon_i = 4
seg_i = 5
def __init__(self, pos, morphcats=[], gramfeats=[], changefeats=[]):
# A string representing part of speech
self.pos = pos
# Weight constraint on FST arcs
self.wc = None if pos == 'all' else FSSet('[pos=' + pos + ']')
# The string used as type in FSs
self.type = '%' + pos
# Ordered lists of morphcats and gramcats
self.morphcats = [MorphCat(m[0], m[1:]) for m in morphcats] if morphcats else morphcats
self.gramfeats = gramfeats
# List of changeable features (not used for AfSem)
self.changefeats = changefeats
# FSTs: [[anal, anal0, anal_S, anal_P, anal0_P, anal_Seg],
# [gen, gen0, gen_S, gen_P, gen0_P, (gen_Seg)]]
self.fsts = [[None, None, None, None, None, None], [None, None, None, None, None, None]]
# FST cascade
self.casc = None
self.casc_inv = None
self.morphology = None
# Default FS for generation
self.defaultFS = '[]'
# Default FS for citation
self.citationFS = '[]'
# Dictionary of FS implications
self.FS_implic = {}
## Functions
self.anal_to_dict = lambda root, fs: {'root': root}
self.dict_to_anal = lambda root, dct: ['', FSSet('[]')]
# Generate citation form
self.citation = None
# Analysis to string
self.anal2string = None
# Postprocess (roots might be treated specially)
self.postprocess = None
# Pair of dicts of common and irregular analyzed words: (complex, simple)
self.analyzed = ({}, {})
self.analyzed_phon = ({}, {})
# Dict of possible grammatical features and their values
self.features = {}
# List of morpheme labels
self.morphs = []
# List of most "interesting" features
self.sig_features = []
# Defective roots
self.defective = []
def get_fst(self, generate=False, guess=False, simplified=False, phon=False, segment=False):
"""The FST satisfying the parameters."""
analgen = self.fsts[self.gen_i if generate else self.anal_i]
if guess:
if phon:
fst = analgen[self.guessphon_i]
else:
fst = analgen[self.guess_i]
elif simplified:
fst = analgen[self.simp_i]
elif phon:
fst = analgen[self.phon_i]
elif segment:
fst = analgen[self.seg_i]
else:
fst = analgen[0] or analgen[self.guess_i] or analgen[self.simp_i]
return fst
def set_fst(self, fst, generate=False, guess=False, simplified=False,
phon=False, segment=False):
"""Assign the FST satisfying the parameters."""
index2 = 0
if simplified:
index2 = self.simp_i
elif guess:
if phon:
index2 = self.guessphon_i
else:
index2 = self.guess_i
elif phon:
index2 = self.phon_i
elif segment:
index2 = self.seg_i
self.fsts[self.gen_i if generate else self.anal_i][index2] = fst
def fst_name(self, generate=False, guess=False, simplified=False,
phon=False, segment=False):
"""Make a name for the FST satisfying the parameters."""
pos = self.pos
if guess:
pos += '0'
if phon:
pos += 'P'
elif simplified:
pos += '_S'
elif phon:
pos += 'P'
elif segment:
pos += '+'
if generate:
pos += 'G'
return pos
def get_analyzed(self, word, simple=False):
"""Stored analysis of word if there is one."""
if self.analyzed:
return self.analyzed[Morphology.simple if simple else Morphology.complex].get(word, None)
def set_analyzed(self, filename='analyzed.lex', ortho=True, simplify=True, verbose=False):
'''Set the dict of analyzed words, reading them in from a file, one per line.'''
if not ortho:
filename = 'analyzed_phon.lex'
path = os.path.join(self.morphology.directory, self.pos + '_' + filename)
if os.path.exists(path):
file = open(path)
if verbose:
print('Storing irregular pre-analyzed forms:', self.pos)
for line in file:
# For ortho=True, each line is
# word root FSS
# For ortho=False, each line is
# word phon root FSS
split_line = line.partition(' ')
word = split_line[0]
if not ortho:
split_line = split_line[2].strip().partition(' ')
phon = split_line[0]
split_anal = split_line[2].strip().partition(' ')
root = split_anal[0]
fs = split_anal[2]
if word and fs:
if not root:
root = word
fs = FSSet.parse(fs)
if ortho:
self.analyzed[Morphology.complex][word] = [root, fs]
else:
self.analyzed_phon[Morphology.complex][word] = [phon, root, fs]
if simplify:
word = self.morphology.simplify(word)
root = self.morphology.simplify(root)
if ortho:
self.analyzed[Morphology.simple][word] = [root, fs]
else:
self.analyzed_phon[Morphology.simple][word] = [phon, root, fs]
file.close()
def get_features(self):
'''Get the dict of grammatical features and values, generating it if {}.'''
if not self.features:
fst = self.get_fst()
if fst:
self.features = fst.get_features()
return self.features
def has_cas(self, generate=False, simplified=False, guess=False,
phon=False, segment=False):
"""Is there a cascade file for the given FST features?"""
name = self.fst_name(generate=generate, simplified=simplified,
guess=guess, phon=phon, segment=segment)
path = os.path.join(self.morphology.directory, name + '.cas')
return os.path.exists(path)
def load_fst(self, compose=False, subcasc=None, generate=False, gen=False,
recreate=False, create_fst=True,
unshelve=True, create_weights=False, guess=False,
simplified=False, phon=False, segment=False,
invert=False,
relabel=True, verbose=False):
'''Load FST; if compose is False, search for saved FST in file and use that if it exists.
If guess is true, create the lexiconless guesser FST.'''
fst = None
if verbose:
s1 = '\nAttempting to load {0} FST for {1} {2}{3}{4}'
print(s1.format(('GENERATION' if generate else 'ANALYSIS'),
self.morphology.language.label, self.pos,
(' (GUESSER)' if guess else ''),
(' (SEGMENTED)' if segment else '')))
if not compose and not recreate:
# Load a composed FST encompassing everything in the cascade
fst = FST.restore(self.pos, self.morphology.directory, seg_units=self.morphology.seg_units,
unshelve=unshelve, create_weights=create_weights, generate=generate,
empty=guess, phon=phon, segment=segment, simplified=simplified, verbose=verbose)
if fst:
self.set_fst(fst, generate, guess, simplified, phon=phon, segment=segment)
if verbose: print('... loaded')
if not self.get_fst(generate, guess, simplified, phon=phon, segment=segment) or recreate:
name = self.fst_name(generate, guess, simplified, phon=phon, segment=segment)
path = os.path.join(self.morphology.directory, name + '.cas')
if verbose: print('Looking for cascade at', path, 'subcasc', subcasc)
if os.path.exists(path):
# Load each of the FSTs in the cascade and compose them
if verbose: print('Recreating...')
self.casc = FSTCascade.load(path,
seg_units=self.morphology.seg_units,
create_networks=True, subcasc=subcasc,
weight_constraint=self.wc, verbose=verbose)
if self.morphology.fsh:
self.casc.set_init_weight(FeatStruct('[' + self.type + ']', fsh=self.morphology.fsh))
if gen:
self.casc_inv = self.casc.inverted()
if verbose:
print("Composing analysis FST")
# create_fst is False in case we just want to load the individuals fsts.
if create_fst:
fst = self.casc.compose(backwards=True, trace=verbose, subcasc=subcasc,
relabel=relabel)
if invert:
fst = fst.inverted()
self.set_fst(fst, generate, guess, simplified, phon=phon, segment=segment)
self.casc.append(fst)
elif verbose:
print(' No cascade exists', end=' ')
if gen: print()
if gen:
if not self.load_fst(compose=compose, generate=True, gen=False,
guess=guess, simplified=simplified, phon=phon, segment=segment,
invert=True, verbose=verbose):
# Explicit generation FST not found, so invert the analysis FST
if verbose: print("... attempting to invert analysis FST")
if fst:
self.set_fst(fst.inverted(), True, guess, simplified,
phon=phon, segment=segment)
if self.get_fst(generate, guess, simplified, phon=phon, segment=segment):
# FST found one way or another
return True
def load_gen_fst(self, guess=False, simplified=False, phon=False, segment=False, verbose=False):
'''Create gen_fst(0) by inverting an existing anal_fst(0) or loading and inverting a generation FST.'''
if not self.get_fst(True, guess, simplified, phon=phon, segment=segment):
print('LOADING GEN FST FOR', self.morphology.language.label, self.pos, ('(guess)' if guess else ''))
name = self.fst_name(True, guess, simplified, phon=phon, segment=segment)
# First try to load the explicit generation FST
gen = self.load_fst(generate=True, guess=guess, simplified=simplified,
phon=phon, segment=segment,
invert=True)
if gen:
self.set_fst(gen, True, guess, simplified, phon=phon, segment=segment)
else:
# The corresponding analysis FST
anal = self.get_fst(False, guess, simplified, phon=phon, segment=segment)
if anal:
self.set_fst(anal.inverted(), True, guess, simplified, phon=phon, segment=segment)
if self.casc:
self.casc_inv = self.casc.inverted()
self.casc_inv.reverse()
def save_fst(self, generate=False, guess=False, simplified=False,
phon=False, segment=False,
features=True, shelve=False):
'''Save FST in a file.'''
fname = self.fst_name(generate=generate, guess=guess, simplified=simplified,
phon=phon, segment=segment)
extension = '.fst'
if shelve:
extension = '.shf'
fst = self.get_fst(generate=generate, guess=guess, simplified=simplified,
phon=phon, segment=segment)
directory = self.morphology.directory if not shelve else os.path.join(self.morphology.directory, 'Shf')
FST.write(fst, filename=os.path.join(directory, fname + extension), shelve=shelve,
features=features, exclude_features=['t', 'm'])
def unsave_fst(self, fst_file=True, shelve=False):
'''Get rid of saved FSTs.'''
if fst_file:
os.remove(os.path.join(self.morphology.directory, self.pos + '.fst'))
if shelve:
os.remove(os.path.join(self.morphology.directory, self.pos + '.shf'))
def anal(self, form, to_dict=False, preproc=False,
guess=False, simplified=False, phon=False, segment=False,
timeit=False, trace=False, tracefeat=''):
"""Analyze form."""
fst = self.get_fst(generate=False, guess=guess, phon=phon, segment=segment)
if guess:
if phon:
fst = self.fsts[self.anal_i][self.guessphon_i]
else:
fst = self.fsts[self.anal_i][self.guess_i]
elif simplified:
fst = self.fsts[self.anal_i][self.simp_i]
elif phon:
fst = self.fsts[self.anal_i][self.phon_i]
elif segment:
fst = self.fsts[self.anal_i][self.seg_i]
else:
fst = self.fsts[self.anal_i][0] or self.fsts[self.anal_i][self.guess_i] or self.fsts[self.anal_i][self.simp_i]
if fst:
if preproc:
# For languages with non-roman orthographies
form = self.morphology.language.preprocess(form)
# If result is same as form and guess is True, reject
anals = fst.transduce(form, seg_units=self.morphology.seg_units, reject_same=guess,
trace=trace, tracefeat=tracefeat, timeit=timeit)
if to_dict:
anals = self.anals_to_dicts(anals)
return anals
elif trace:
print('No analysis FST loaded for', self.pos)
def gen(self, root, features=None, from_dict=False, postproc=False, update_feats=None,
guess=False, simplified=False, phon=False, segment=False,
fst=None,
timeit=False, trace=False):
"""Generate word from root and features."""
features = features or self.defaultFS
if update_feats:
features = self.update_FS(FeatStruct(features), update_feats)
if not features:
return []
fst = fst or self.get_fst(generate=True, guess=guess, simplified=simplified,
phon=phon, segment=segment)
if from_dict:
# Features is a dictionary; it may contain the root if it's not specified
anal = self.dict_to_anal(root, features)
root = anal[0]
features = anal[1]
else:
features = FSSet.cast(features)
# print("Features")
# print(features)
if fst:
gens = fst.transduce(root, features, seg_units=self.morphology.seg_units, trace=trace, timeit=timeit)
if postproc:
# For languages with non-roman orthographies
for gen in gens:
# Replace the wordforms with postprocessed versions
gen[0] = self.morphology.language.postprocess(gen[0])
return gens
elif trace:
print('No generation FST loaded')
def anals_to_dicts(self, analyses):
'''Convert list of analyses to list of dicts.'''
dicts = []
for anal in analyses:
root = anal[0]
for fs in anal[1]:
dicts.append(self.anal_to_dict(root, fs))
return dicts
def anal_to_gram(self, anal, gen_root=None):
"""Convert an analysis into a list of lists of morphs and grams."""
gram = []
for a in anal:
# A single root, possibly multiple fss
root = gen_root or a[0]
# FeatStruct set
for fs in a[1]:
gram.append((self.fs_to_morphs(root, fs),
self.fs_to_feats(root, fs),
a[0]))
return gram
def postproc(self, analysis):
'''Postprocess analysis (mutating it) according postproc attribute in Morphology.'''
if self.postprocess:
return self.postprocess(analysis)
else:
return analysis
def update_FS(self, fs, features, top=True):
"""Add or modify features (a FS or string) in fs."""
fs = fs.copy()
# First make sure features is a FeatStruct
if isinstance(features, str):
features = FeatStruct(features)
for key, value in features.items():
# Make True any features that are implied by key
implications = self.FS_implic.get(key, [])
# All of the implications associated with key
for implic in implications:
# Implications that are not represented as lists just mean
# to make that feature True
# (Make sure the feature doesn't have an explicit value)
if not isinstance(implic, list) and not isinstance(implic, tuple) \
and implic not in features:
fs.update({implic: True})
# Complex feature in features
if isinstance(value, FeatStruct):
# Recursively update value with value in fs for key
if key not in fs:
return []
value = self.update_FS(fs.get(key), value, top=False)
# And make True any features that must be True in value
for implic in implications:
if isinstance(implic, list):
for imp in implic:
# Make sure the feature doesn't have an explicit value
if imp not in features:
value.update({imp: True})
fs.update({key: value})
# Finally check all of the key, value pairs in self.FS_implic for
# which the key is a tuple: (feat, value)
if top:
for key, implics in self.FS_implic.items():
if isinstance(key, tuple):
# See whether this tuple represents the value of a feature
# in features
if features.get(key[0], 'missing') == key[1]:
# If so, apply the constraints, as long as they're not
# overridden by an explicit value in features
for f, v in implics:
# If v is a list, then make the value of the listed
# item in the list in fs[f] True
if isinstance(v, list):
if f in features and v[0] in features[f]:
continue
fs[f][v[0]] = True
# If v is is tuple, then make the value of the item
# in the tuple False
elif isinstance(v, tuple):
if f in features and v[0] in features[f]:
continue
fs[f][v[0]] = False
elif f not in features:
# Otherwise treat f as feature, v as value in fs
fs[f] = v
return fs
def ortho2phon(self, form, guess=False):
"""Convert orthographic input to phonetic form."""
output = {}
analyzed = self.analyzed_phon[Morphology.complex].get(form, None)
if analyzed:
word, root, anals = analyzed
output[word] = [[self.pos, root, None, anal] for anal in anals]
return output
gen_fst = self.get_fst(generate=True, guess=guess, phon=True)
if not gen_fst:
return
analyses = self.anal(form, guess=guess)
if analyses:
for root, anals in analyses:
for anal in anals:
out = self.gen(root, features=anal, phon=True, fst=gen_fst)
for o in out:
word = o[0]
output[word] = output.get(word, []) + [(self.pos, root, None, anal)]
return output
def anal1(self, input, label='', casc_label='', index=0, load=False):
"""Analyze input in a single sub-FST, given its label or index in the cascade."""
return self._proc1(input, label=label, casc_label=casc_label, index=index, load=load,
anal=True)
def gen1(self, input, feats=None, fss=None, label='', casc_label='', index=0, load=False):
"""Generate a form for an input and update features or FSS in a single sub-FST,
given its label or index in the cascade."""
return self._proc1(input, feats=feats, fss=fss, label=label, casc_label=casc_label,
index=index, load=load, anal=False)
def _proc1(self, input, feats=None, fss='', label='', casc_label='', index=0,
load=False, anal=True):
"""Process a form and input features or FSS in a single sub-FST, given its label
or index in the cascade.
@param input: input to FST (wordform or root)
@type input: string
@param feats: features to add to default for generation
@type feats: string of bracketed feature-value pairs
@param fss: feature structure set (alternative to feats)
@type fss: FSSet
@param label: name of FST
@type label: string
@param casc_label: name of alternative cascade (without .cas)
@type casc_label: string
@param index: index of FST in FSTCascade
@type index: int
@param load: whether to (re)load the default cascade
@type load: boolean
@param anal: whether to do analysis as opposed to generation
@type anal: boolean
@return analyses or wordforms
@rtype list: [[string, FSSet]...]
"""
if casc_label:
casc = FSTCascade.load(os.path.join(self.morphology.directory, casc_label + '.cas'),
seg_units = self.morphology.seg_units,
create_networks=True,
weight_constraint=self.wc, verbose=True)
else:
if load:
name = self.fst_name(not anal, False, False)
self.casc = FSTCascade.load(os.path.join(self.morphology.directory, name + '.cas'),
seg_units = self.morphology.seg_units,
create_networks=True,
weight_constraint=self.wc, verbose=True)
casc = self.casc
if label:
# Find the FST with the particular label
fst = None
i = 0
while not fst and i < len(self.casc):
f = casc[i]
if f.label == label:
fst = f
i += 1
else:
fst = casc[index]
if not anal and not fss:
features = FeatStruct(self.defaultFS)
if feats:
features = self.update_FS(features, feats)
fss = FSSet.cast(features)
print(('Analyzing' if anal else 'Generating'), input, 'with FST', fst.label)
if not anal:
fst = fst.inverted()
return fst.transduce(input, fss, seg_units=self.morphology.seg_units,
timeout=20)
def __str__(self):
'''Print name.'''
return self.pos + '_morphology'
def __repr__(self):
'''Print name.'''
return self.pos + '_morphology'
class MorphCat(list):
"""A list of morphs, default first."""
def __init__(self, label, *morphs):
list.__init__(self, morphs)
self.label = label
self.default = morphs[0] if morphs else '0'
class GramCat(list):
"""A list of grams."""
def __init__(self, label, *grams):
list.__init__(self, grams)
self.label = label
| gpl-3.0 |
def-/commandergenius | project/jni/python/src/Lib/plat-freebsd6/IN.py | 172 | 12416 | # Generated by h2py from /usr/include/netinet/in.h
# Included from sys/cdefs.h
__GNUCLIKE_ASM = 3
__GNUCLIKE_ASM = 2
__GNUCLIKE___TYPEOF = 1
__GNUCLIKE___OFFSETOF = 1
__GNUCLIKE___SECTION = 1
__GNUCLIKE_ATTRIBUTE_MODE_DI = 1
__GNUCLIKE_CTOR_SECTION_HANDLING = 1
__GNUCLIKE_BUILTIN_CONSTANT_P = 1
__GNUCLIKE_BUILTIN_VARARGS = 1
__GNUCLIKE_BUILTIN_STDARG = 1
__GNUCLIKE_BUILTIN_VAALIST = 1
__GNUC_VA_LIST_COMPATIBILITY = 1
__GNUCLIKE_BUILTIN_NEXT_ARG = 1
__GNUCLIKE_BUILTIN_MEMCPY = 1
__CC_SUPPORTS_INLINE = 1
__CC_SUPPORTS___INLINE = 1
__CC_SUPPORTS___INLINE__ = 1
__CC_SUPPORTS___FUNC__ = 1
__CC_SUPPORTS_WARNING = 1
__CC_SUPPORTS_VARADIC_XXX = 1
__CC_SUPPORTS_DYNAMIC_ARRAY_INIT = 1
__CC_INT_IS_32BIT = 1
def __P(protos): return protos
def __STRING(x): return #x
def __XSTRING(x): return __STRING(x)
def __P(protos): return ()
def __STRING(x): return "x"
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def __nonnull(x): return __attribute__((__nonnull__(x)))
def __predict_true(exp): return __builtin_expect((exp), 1)
def __predict_false(exp): return __builtin_expect((exp), 0)
def __predict_true(exp): return (exp)
def __predict_false(exp): return (exp)
def __format_arg(fmtarg): return __attribute__((__format_arg__ (fmtarg)))
def __FBSDID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID_SOURCE(s): return __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
def __SCCSID(s): return __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
def __COPYRIGHT(s): return __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
_POSIX_C_SOURCE = 199009
_POSIX_C_SOURCE = 199209
__XSI_VISIBLE = 600
_POSIX_C_SOURCE = 200112
__XSI_VISIBLE = 500
_POSIX_C_SOURCE = 199506
_POSIX_C_SOURCE = 198808
__POSIX_VISIBLE = 200112
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 199506
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199309
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199209
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199009
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 198808
__ISO_C_VISIBLE = 0
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 200112
__XSI_VISIBLE = 600
__BSD_VISIBLE = 1
__ISO_C_VISIBLE = 1999
# Included from sys/_types.h
# Included from machine/_types.h
# Included from machine/endian.h
_QUAD_HIGHWORD = 1
_QUAD_LOWWORD = 0
_LITTLE_ENDIAN = 1234
_BIG_ENDIAN = 4321
_PDP_ENDIAN = 3412
_BYTE_ORDER = _LITTLE_ENDIAN
LITTLE_ENDIAN = _LITTLE_ENDIAN
BIG_ENDIAN = _BIG_ENDIAN
PDP_ENDIAN = _PDP_ENDIAN
BYTE_ORDER = _BYTE_ORDER
def __word_swap_int_var(x): return \
def __word_swap_int_const(x): return \
def __word_swap_int(x): return __word_swap_int_var(x)
def __byte_swap_int_var(x): return \
def __byte_swap_int_const(x): return \
def __byte_swap_int(x): return __byte_swap_int_var(x)
def __byte_swap_long_var(x): return \
def __byte_swap_long_const(x): return \
def __byte_swap_long(x): return __byte_swap_long_var(x)
def __byte_swap_word_var(x): return \
def __byte_swap_word_const(x): return \
def __byte_swap_word(x): return __byte_swap_word_var(x)
def __htonl(x): return __bswap32(x)
def __htons(x): return __bswap16(x)
def __ntohl(x): return __bswap32(x)
def __ntohs(x): return __bswap16(x)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_TCP = 6
IPPROTO_UDP = 17
def htonl(x): return __htonl(x)
def htons(x): return __htons(x)
def ntohl(x): return __ntohl(x)
def ntohs(x): return __ntohs(x)
IPPROTO_RAW = 255
INET_ADDRSTRLEN = 16
IPPROTO_HOPOPTS = 0
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_MOBILE = 55
IPPROTO_TLSP = 56
IPPROTO_SKIP = 57
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_SCTP = 132
IPPROTO_PIM = 103
IPPROTO_CARP = 112
IPPROTO_PGM = 113
IPPROTO_PFSYNC = 240
IPPROTO_OLD_DIVERT = 254
IPPROTO_MAX = 256
IPPROTO_DONE = 257
IPPROTO_DIVERT = 258
IPPROTO_SPACER = 32767
IPPORT_RESERVED = 1024
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
IPPORT_MAX = 65535
def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_SENDSRCADDR = IP_RECVDSTADDR
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_ONESBCAST = 23
IP_FW_TABLE_ADD = 40
IP_FW_TABLE_DEL = 41
IP_FW_TABLE_FLUSH = 42
IP_FW_TABLE_GETSIZE = 43
IP_FW_TABLE_LIST = 44
IP_FW_ADD = 50
IP_FW_DEL = 51
IP_FW_FLUSH = 52
IP_FW_ZERO = 53
IP_FW_GET = 54
IP_FW_RESETLOG = 55
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_RECVTTL = 65
IP_MINTTL = 66
IP_DONTFRAG = 67
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
def in_nullhost(x): return ((x).s_addr == INADDR_ANY)
# Included from netinet6/in6.h
__KAME_VERSION = "FreeBSD"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
IPV6_ADDR_INT32_ONE = 1
IPV6_ADDR_INT32_TWO = 2
IPV6_ADDR_INT32_MNL = 0xff010000
IPV6_ADDR_INT32_MLL = 0xff020000
IPV6_ADDR_INT32_SMP = 0x0000ffff
IPV6_ADDR_INT16_ULL = 0xfe80
IPV6_ADDR_INT16_USL = 0xfec0
IPV6_ADDR_INT16_MLL = 0xff02
IPV6_ADDR_INT32_ONE = 0x01000000
IPV6_ADDR_INT32_TWO = 0x02000000
IPV6_ADDR_INT32_MNL = 0x000001ff
IPV6_ADDR_INT32_MLL = 0x000002ff
IPV6_ADDR_INT32_SMP = 0xffff0000
IPV6_ADDR_INT16_ULL = 0x80fe
IPV6_ADDR_INT16_USL = 0xc0fe
IPV6_ADDR_INT16_MLL = 0x02ff
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
IPV6_ADDR_SCOPE_NODELOCAL = 0x01
IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
IPV6_ADDR_SCOPE_SITELOCAL = 0x05
IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
IPV6_ADDR_SCOPE_GLOBAL = 0x0e
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_INTFACELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_SCOPE_LINKLOCAL(a): return \
def IFA6_IS_DEPRECATED(a): return \
def IFA6_IS_INVALID(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_2292PKTINFO = 19
IPV6_2292HOPLIMIT = 20
IPV6_2292NEXTHOP = 21
IPV6_2292HOPOPTS = 22
IPV6_2292DSTOPTS = 23
IPV6_2292RTHDR = 24
IPV6_2292PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_V6ONLY = 27
IPV6_BINDV6ONLY = IPV6_V6ONLY
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDRDSTOPTS = 35
IPV6_RECVPKTINFO = 36
IPV6_RECVHOPLIMIT = 37
IPV6_RECVRTHDR = 38
IPV6_RECVHOPOPTS = 39
IPV6_RECVDSTOPTS = 40
IPV6_RECVRTHDRDSTOPTS = 41
IPV6_USE_MIN_MTU = 42
IPV6_RECVPATHMTU = 43
IPV6_PATHMTU = 44
IPV6_REACHCONF = 45
IPV6_PKTINFO = 46
IPV6_HOPLIMIT = 47
IPV6_NEXTHOP = 48
IPV6_HOPOPTS = 49
IPV6_DSTOPTS = 50
IPV6_RTHDR = 51
IPV6_PKTOPTIONS = 52
IPV6_RECVTCLASS = 57
IPV6_AUTOFLOWLABEL = 59
IPV6_TCLASS = 61
IPV6_DONTFRAG = 62
IPV6_PREFER_TEMPADDR = 63
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_V6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_USETEMPADDR = 32
IPV6CTL_TEMPPLTIME = 33
IPV6CTL_TEMPVLTIME = 34
IPV6CTL_AUTO_LINKLOCAL = 35
IPV6CTL_RIP6STATS = 36
IPV6CTL_PREFER_TEMPADDR = 37
IPV6CTL_ADDRCTLPOLICY = 38
IPV6CTL_USE_DEFAULTZONE = 39
IPV6CTL_MAXFRAGS = 41
IPV6CTL_IFQ = 42
IPV6CTL_ISATAPRTR = 43
IPV6CTL_MCAST_PMTU = 44
IPV6CTL_STEALTH = 45
IPV6CTL_MAXID = 46
| lgpl-2.1 |
cloudera/hue | desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/orm/session.py | 2 | 131284 | # orm/session.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Session class and related utilities."""
import itertools
import sys
import weakref
from . import attributes
from . import exc
from . import identity
from . import loading
from . import persistence
from . import query
from . import state as statelib
from .base import _class_to_mapper
from .base import _none_set
from .base import _state_mapper
from .base import instance_str
from .base import object_mapper
from .base import object_state
from .base import state_str
from .deprecated_interfaces import SessionExtension
from .unitofwork import UOWTransaction
from .. import engine
from .. import exc as sa_exc
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import expression
from ..sql import util as sql_util
__all__ = ["Session", "SessionTransaction", "SessionExtension", "sessionmaker"]
_sessions = weakref.WeakValueDictionary()
"""Weak-referencing dictionary of :class:`.Session` objects.
"""
def _state_session(state):
"""Given an :class:`.InstanceState`, return the :class:`.Session`
associated, if any.
"""
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
class _SessionClassMethods(object):
"""Class-level methods for :class:`.Session`, :class:`.sessionmaker`."""
@classmethod
@util.deprecated(
"1.3",
"The :meth:`.Session.close_all` method is deprecated and will be "
"removed in a future release. Please refer to "
":func:`.session.close_all_sessions`.",
)
def close_all(cls):
"""Close *all* sessions in memory."""
close_all_sessions()
@classmethod
@util.dependencies("sqlalchemy.orm.util")
def identity_key(cls, orm_util, *args, **kwargs):
"""Return an identity key.
This is an alias of :func:`.util.identity_key`.
"""
return orm_util.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the :class:`.Session` to which an object belongs.
This is an alias of :func:`.object_session`.
"""
return object_session(instance)
ACTIVE = util.symbol("ACTIVE")
PREPARED = util.symbol("PREPARED")
COMMITTED = util.symbol("COMMITTED")
DEACTIVE = util.symbol("DEACTIVE")
CLOSED = util.symbol("CLOSED")
class SessionTransaction(object):
"""A :class:`.Session`-level transaction.
:class:`.SessionTransaction` is a mostly behind-the-scenes object
not normally referenced directly by application code. It coordinates
among multiple :class:`_engine.Connection` objects, maintaining a database
transaction for each one individually, committing or rolling them
back all at once. It also provides optional two-phase commit behavior
which can augment this coordination operation.
The :attr:`.Session.transaction` attribute of :class:`.Session`
refers to the current :class:`.SessionTransaction` object in use, if any.
The :attr:`.SessionTransaction.parent` attribute refers to the parent
:class:`.SessionTransaction` in the stack of :class:`.SessionTransaction`
objects. If this attribute is ``None``, then this is the top of the stack.
If non-``None``, then this :class:`.SessionTransaction` refers either
to a so-called "subtransaction" or a "nested" transaction. A
"subtransaction" is a scoping concept that demarcates an inner portion
of the outermost "real" transaction. A nested transaction, which
is indicated when the :attr:`.SessionTransaction.nested`
attribute is also True, indicates that this :class:`.SessionTransaction`
corresponds to a SAVEPOINT.
**Life Cycle**
A :class:`.SessionTransaction` is associated with a :class:`.Session`
in its default mode of ``autocommit=False`` immediately, associated
with no database connections. As the :class:`.Session` is called upon
to emit SQL on behalf of various :class:`_engine.Engine` or
:class:`_engine.Connection`
objects, a corresponding :class:`_engine.Connection` and associated
:class:`.Transaction` is added to a collection within the
:class:`.SessionTransaction` object, becoming one of the
connection/transaction pairs maintained by the
:class:`.SessionTransaction`. The start of a :class:`.SessionTransaction`
can be tracked using the :meth:`.SessionEvents.after_transaction_create`
event.
The lifespan of the :class:`.SessionTransaction` ends when the
:meth:`.Session.commit`, :meth:`.Session.rollback` or
:meth:`.Session.close` methods are called. At this point, the
:class:`.SessionTransaction` removes its association with its parent
:class:`.Session`. A :class:`.Session` that is in ``autocommit=False``
mode will create a new :class:`.SessionTransaction` to replace it
immediately, whereas a :class:`.Session` that's in ``autocommit=True``
mode will remain without a :class:`.SessionTransaction` until the
:meth:`.Session.begin` method is called. The end of a
:class:`.SessionTransaction` can be tracked using the
:meth:`.SessionEvents.after_transaction_end` event.
**Nesting and Subtransactions**
Another detail of :class:`.SessionTransaction` behavior is that it is
capable of "nesting". This means that the :meth:`.Session.begin` method
can be called while an existing :class:`.SessionTransaction` is already
present, producing a new :class:`.SessionTransaction` that temporarily
replaces the parent :class:`.SessionTransaction`. When a
:class:`.SessionTransaction` is produced as nested, it assigns itself to
the :attr:`.Session.transaction` attribute, and it additionally will assign
the previous :class:`.SessionTransaction` to its :attr:`.Session.parent`
attribute. The behavior is effectively a
stack, where :attr:`.Session.transaction` refers to the current head of
the stack, and the :attr:`.SessionTransaction.parent` attribute allows
traversal up the stack until :attr:`.SessionTransaction.parent` is
``None``, indicating the top of the stack.
When the scope of :class:`.SessionTransaction` is ended via
:meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its
parent :class:`.SessionTransaction` back onto the
:attr:`.Session.transaction` attribute.
The purpose of this stack is to allow nesting of
:meth:`.Session.rollback` or :meth:`.Session.commit` calls in context
with various flavors of :meth:`.Session.begin`. This nesting behavior
applies to when :meth:`.Session.begin_nested` is used to emit a
SAVEPOINT transaction, and is also used to produce a so-called
"subtransaction" which allows a block of code to use a
begin/rollback/commit sequence regardless of whether or not its enclosing
code block has begun a transaction. The :meth:`.flush` method, whether
called explicitly or via autoflush, is the primary consumer of the
"subtransaction" feature, in that it wishes to guarantee that it works
within in a transaction block regardless of whether or not the
:class:`.Session` is in transactional mode when the method is called.
Note that the flush process that occurs within the "autoflush" feature
as well as when the :meth:`.Session.flush` method is used **always**
creates a :class:`.SessionTransaction` object. This object is normally
a subtransaction, unless the :class:`.Session` is in autocommit mode
and no transaction exists at all, in which case it's the outermost
transaction. Any event-handling logic or other inspection logic
needs to take into account whether a :class:`.SessionTransaction`
is the outermost transaction, a subtransaction, or a "nested" / SAVEPOINT
transaction.
.. seealso::
:meth:`.Session.rollback`
:meth:`.Session.commit`
:meth:`.Session.begin`
:meth:`.Session.begin_nested`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_transaction_create`
:meth:`.SessionEvents.after_transaction_end`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._state = ACTIVE
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress"
)
if self.session._enable_transaction_accounting:
self._take_snapshot()
self.session.dispatch.after_transaction_create(self.session, self)
@property
def parent(self):
"""The parent :class:`.SessionTransaction` of this
:class:`.SessionTransaction`.
If this attribute is ``None``, indicates this
:class:`.SessionTransaction` is at the top of the stack, and
corresponds to a real "COMMIT"/"ROLLBACK"
block. If non-``None``, then this is either a "subtransaction"
or a "nested" / SAVEPOINT transaction. If the
:attr:`.SessionTransaction.nested` attribute is ``True``, then
this is a SAVEPOINT, and if ``False``, indicates this a subtransaction.
.. versionadded:: 1.0.16 - use ._parent for previous versions
"""
return self._parent
nested = False
"""Indicates if this is a nested, or SAVEPOINT, transaction.
When :attr:`.SessionTransaction.nested` is True, it is expected
that :attr:`.SessionTransaction.parent` will be True as well.
"""
@property
def is_active(self):
return self.session is not None and self._state is ACTIVE
def _assert_active(
self,
prepared_ok=False,
rollback_ok=False,
deactive_ok=False,
closed_msg="This transaction is closed",
):
if self._state is COMMITTED:
raise sa_exc.InvalidRequestError(
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is PREPARED:
if not prepared_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is DEACTIVE:
if not deactive_ok and not rollback_ok:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception,
code="7s2a",
)
elif not deactive_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'inactive' state, due to the "
"SQL transaction being rolled back; no further "
"SQL can be emitted within this transaction."
)
elif self._state is CLOSED:
raise sa_exc.ResourceClosedError(closed_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, execution_options=None, **kwargs):
self._assert_active()
bind = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(bind, execution_options)
def _begin(self, nested=False):
self._assert_active()
return SessionTransaction(self.session, self, nested=nested)
def _iterate_self_and_parents(self, upto=None):
current = self
result = ()
while current:
result += (current,)
if current._parent is upto:
break
elif current._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list"
% (upto)
)
else:
current = current._parent
return result
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
self._dirty = self._parent._dirty
self._key_switches = self._parent._key_switches
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
self._dirty = weakref.WeakKeyDictionary()
self._key_switches = weakref.WeakKeyDictionary()
def _restore_snapshot(self, dirty_only=False):
"""Restore the restoration state taken before a transaction began.
Corresponds to a rollback.
"""
assert self._is_transaction_boundary
to_expunge = set(self._new).union(self.session._new)
self.session._expunge_states(to_expunge, to_transient=True)
for s, (oldkey, newkey) in self._key_switches.items():
# we probably can do this conditionally based on
# if we expunged or not, but safe_discard does that anyway
self.session.identity_map.safe_discard(s)
# restore the old key
s.key = oldkey
# now restore the object, but only if we didn't expunge
if s not in to_expunge:
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
self.session._update_impl(s, revert_deletion=True)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
if not dirty_only or s.modified or s in self._dirty:
s._expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
"""Remove the restoration state taken before a transaction began.
Corresponds to a commit.
"""
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s._expire(s.dict, self.session.identity_map._modified)
statelib.InstanceState._detach_states(
list(self._deleted), self.session
)
self._deleted.clear()
elif self.nested:
self._parent._new.update(self._new)
self._parent._dirty.update(self._dirty)
self._parent._deleted.update(self._deleted)
self._parent._key_switches.update(self._key_switches)
def _connection_for_bind(self, bind, execution_options):
self._assert_active()
if bind in self._connections:
if execution_options:
util.warn(
"Connection is already established for the "
"given bind; execution_options ignored"
)
return self._connections[bind][0]
local_connect = False
if self._parent:
conn = self._parent._connection_for_bind(bind, execution_options)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine"
)
else:
conn = bind._contextual_connect()
local_connect = True
try:
if execution_options:
conn = conn.execution_options(**execution_options)
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
except:
# connection will not not be associated with this Session;
# close it immediately so that it isn't closed under GC
if local_connect:
conn.close()
raise
else:
self._connections[conn] = self._connections[conn.engine] = (
conn,
transaction,
conn is not bind,
)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"'twophase' mode not enabled, or not root transaction; "
"can't prepare."
)
self._prepare_impl()
def _prepare_impl(self):
self._assert_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_self_and_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
for _flush_guard in range(100):
if self.session._is_clean():
break
self.session.flush()
else:
raise exc.FlushError(
"Over 100 subsequent flushes have occurred within "
"session.commit() - is an after_flush() hook "
"creating new objects?"
)
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
with util.safe_reraise():
self.rollback()
self._state = PREPARED
def commit(self):
self._assert_active(prepared_ok=True)
if self._state is not PREPARED:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self._state = COMMITTED
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_active(prepared_ok=True, rollback_ok=True)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_self_and_parents(upto=self):
subtransaction.close()
boundary = self
rollback_err = None
if self._state in (ACTIVE, PREPARED):
for transaction in self._iterate_self_and_parents():
if transaction._parent is None or transaction.nested:
try:
for t in set(transaction._connections.values()):
t[1].rollback()
transaction._state = DEACTIVE
self.session.dispatch.after_rollback(self.session)
except:
rollback_err = sys.exc_info()
finally:
transaction._state = DEACTIVE
if self.session._enable_transaction_accounting:
transaction._restore_snapshot(
dirty_only=transaction.nested
)
boundary = transaction
break
else:
transaction._state = DEACTIVE
sess = self.session
if (
not rollback_err
and sess._enable_transaction_accounting
and not sess._is_clean()
):
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded."
)
boundary._restore_snapshot(dirty_only=boundary.nested)
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
if rollback_err:
util.raise_(rollback_err[1], with_traceback=rollback_err[2])
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def close(self, invalidate=False):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in set(
self._connections.values()
):
if invalidate:
connection.invalidate()
if autoclose:
connection.close()
else:
transaction.close()
self._state = CLOSED
self.session.dispatch.after_transaction_end(self.session, self)
if self._parent is None:
if not self.session.autocommit:
self.session.begin()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self._assert_active(deactive_ok=True, prepared_ok=True)
if self.session.transaction is None:
return
if type_ is None:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class Session(_SessionClassMethods):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :doc:`/orm/session`.
"""
public_methods = (
"__contains__",
"__iter__",
"add",
"add_all",
"begin",
"begin_nested",
"close",
"commit",
"connection",
"delete",
"execute",
"expire",
"expire_all",
"expunge",
"expunge_all",
"flush",
"get_bind",
"is_modified",
"bulk_save_objects",
"bulk_insert_mappings",
"bulk_update_mappings",
"merge",
"query",
"refresh",
"rollback",
"scalar",
)
@util.deprecated_params(
weak_identity_map=(
"1.0",
"The :paramref:`.Session.weak_identity_map` parameter as well as "
"the strong-referencing identity map are deprecated, and will be "
"removed in a future release. For the use case where objects "
"present in a :class:`.Session` need to be automatically strong "
"referenced, see the recipe at "
":ref:`session_referencing_behavior` for an event-based approach "
"to maintaining strong identity references. ",
),
_enable_transaction_accounting=(
"0.7",
"The :paramref:`.Session._enable_transaction_accounting` "
"parameter is deprecated and will be removed in a future release.",
),
extension=(
"0.7",
":class:`.SessionExtension` is deprecated in favor of the "
":class:`.SessionEvents` listener interface. The "
":paramref:`.Session.extension` parameter will be "
"removed in a future release.",
),
)
def __init__(
self,
bind=None,
autoflush=True,
expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False,
twophase=False,
weak_identity_map=None,
binds=None,
extension=None,
enable_baked_queries=True,
info=None,
query_cls=None,
):
r"""Construct a new Session.
See also the :class:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit:
.. warning::
The autocommit flag is **not for general use**, and if it is
used, queries should only be invoked within the span of a
:meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing
queries outside of a demarcated transaction is a legacy mode
of usage, and can in some cases lead to concurrent connection
checkouts.
Defaults to ``False``. When ``True``, the
:class:`.Session` does not keep a persistent transaction running,
and will acquire connections from the engine on an as-needed basis,
returning them immediately after their use. Flushes will begin and
commit (or possibly rollback) their own transaction if no
transaction is present. When using this mode, the
:meth:`.Session.begin` method is used to explicitly start
transactions.
.. seealso::
:ref:`session_autocommit`
:param autoflush: When ``True``, all query operations will issue a
:meth:`~.Session.flush` call to this ``Session`` before proceeding.
This is a convenience feature so that :meth:`~.Session.flush` need
not be called repeatedly in order for database queries to retrieve
results. It's typical that ``autoflush`` is used in conjunction
with ``autocommit=False``. In this scenario, explicit calls to
:meth:`~.Session.flush` are rarely needed; you usually only need to
call :meth:`~.Session.commit` (which flushes) to finalize changes.
:param bind: An optional :class:`_engine.Engine` or
:class:`_engine.Connection` to
which this ``Session`` should be bound. When specified, all SQL
operations performed by this session will execute via this
connectable.
:param binds: A dictionary which may specify any number of
:class:`_engine.Engine` or :class:`_engine.Connection`
objects as the source of
connectivity for SQL operations on a per-entity basis. The keys
of the dictionary consist of any series of mapped classes,
arbitrary Python classes that are bases for mapped classes,
:class:`_schema.Table` objects and :class:`_orm.Mapper` objects.
The
values of the dictionary are then instances of
:class:`_engine.Engine`
or less commonly :class:`_engine.Connection` objects.
Operations which
proceed relative to a particular mapped class will consult this
dictionary for the closest matching entity in order to determine
which :class:`_engine.Engine` should be used for a particular SQL
operation. The complete heuristics for resolution are
described at :meth:`.Session.get_bind`. Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
SomeDeclarativeBase: create_engine('postgresql://engine2'),
some_mapper: create_engine('postgresql://engine3'),
some_table: create_engine('postgresql://engine4'),
})
.. seealso::
:ref:`session_partitioning`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
:meth:`.Session.get_bind`
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the
returned class. This is the only argument that is local to the
:class:`.sessionmaker` function, and is not sent directly to the
constructor for ``Session``.
:param enable_baked_queries: defaults to ``True``. A flag consumed
by the :mod:`sqlalchemy.ext.baked` extension to determine if
"baked queries" should be cached, as is the normal operation
of this extension. When set to ``False``, all caching is disabled,
including baked queries defined by the calling application as
well as those used internally. Setting this flag to ``False``
can significantly reduce memory use, however will also degrade
performance for those areas that make use of baked queries
(such as relationship loaders). Additionally, baked query
logic in the calling application or potentially within the ORM
that may be malfunctioning due to cache key collisions or similar
can be flagged by observing if this flag resolves the issue.
.. versionadded:: 1.2
:param _enable_transaction_accounting: A
legacy-only flag which when ``False`` disables *all* 0.5-style
object accounting on transaction boundaries.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each :meth:`~.commit`,
so that all attribute/object access subsequent to a completed
transaction will load from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and
flush events, as well as a post-rollback event.
:param info: optional dictionary of arbitrary data to be associated
with this :class:`.Session`. Is available via the
:attr:`.Session.info` attribute. Note the dictionary is copied at
construction time so that modifications to the per-
:class:`.Session` dictionary will be local to that
:class:`.Session`.
.. versionadded:: 0.9.0
:param query_cls: Class which should be used to create new Query
objects, as returned by the :meth:`~.Session.query` method.
Defaults to :class:`_query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a
:meth:`~.commit`, after :meth:`~.flush` has been issued for all
attached databases, the :meth:`~.TwoPhaseTransaction.prepare`
method on each database's :class:`.TwoPhaseTransaction` will be
called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed.
"""
if weak_identity_map in (True, None):
self._identity_cls = identity.WeakInstanceDict
else:
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self._warn_on_events = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self.enable_baked_queries = enable_baked_queries
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls if query_cls else query.Query
if info:
self.info.update(info)
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for key, bind in binds.items():
self._add_bind(key, bind)
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
@util.memoized_property
def info(self):
"""A user-modifiable dictionary.
The initial value of this dictionary can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
.. versionadded:: 0.9.0
"""
return {}
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this :class:`.Session`.
.. warning::
The :meth:`.Session.begin` method is part of a larger pattern
of use with the :class:`.Session` known as **autocommit mode**.
This is essentially a **legacy mode of use** and is
not necessary for new applications. The :class:`.Session`
normally handles the work of "begin" transparently, which in
turn relies upon the Python DBAPI to transparently "begin"
transactions; there is **no need to explicitly begin transactions**
when using modern :class:`.Session` programming patterns.
In its default mode of ``autocommit=False``, the
:class:`.Session` does all of its work within
the context of a transaction, so as soon as you call
:meth:`.Session.commit`, the next transaction is implicitly
started when the next database operation is invoked. See
:ref:`session_autocommit` for further background.
The method will raise an error if this :class:`.Session` is already
inside of a transaction, unless
:paramref:`~.Session.begin.subtransactions` or
:paramref:`~.Session.begin.nested` are specified. A "subtransaction"
is essentially a code embedding pattern that does not affect the
transactional state of the database connection unless a rollback is
emitted, in which case the whole transaction is rolled back. For
documentation on subtransactions, please see
:ref:`session_subtransactions`.
:param subtransactions: if True, indicates that this
:meth:`~.Session.begin` can create a "subtransaction".
:param nested: if True, begins a SAVEPOINT transaction and is
equivalent to calling :meth:`~.Session.begin_nested`. For
documentation on SAVEPOINT transactions, please see
:ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction`
acts as a Python context manager, allowing :meth:`.Session.begin`
to be used in a "with" block. See :ref:`session_autocommit` for
an example.
.. seealso::
:ref:`session_autocommit`
:meth:`.Session.begin_nested`
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use "
"subtransactions=True to allow subtransactions."
)
else:
self.transaction = SessionTransaction(self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a "nested" transaction on this Session, e.g. SAVEPOINT.
The target database(s) and associated drivers must support SQL
SAVEPOINT for this method to function correctly.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction` acts as a context manager, allowing
:meth:`.Session.begin_nested` to be used in a "with" block.
See :ref:`session_begin_nested` for a usage example.
.. seealso::
:ref:`session_begin_nested`
:ref:`pysqlite_serializable` - special workarounds required
with the SQLite driver in order for SAVEPOINT to work
correctly.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
:meth:`.begin` is called multiple times.
.. seealso::
:ref:`session_rollback`
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :class:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
When using the :class:`.Session` in its default mode of
``autocommit=False``, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
.. seealso::
:ref:`session_committing`
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(
self,
mapper=None,
clause=None,
bind=None,
close_with_result=False,
execution_options=None,
**kw
):
r"""Return a :class:`_engine.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`_engine.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`_engine.Connection`
returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with
``autocommit=True``, an ad-hoc :class:`_engine.Connection` is returned
using :meth:`_engine.Engine.connect` on the underlying
:class:`_engine.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`_engine.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`_expression.ClauseElement` (i.e.
:func:`_expression.select`,
:func:`_expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`_engine.Engine.connect`,
indicating the :class:`_engine.Connection` should be considered
"single use", automatically closing when the first result set is
closed. This flag only has an effect if this :class:`.Session` is
configured with ``autocommit=True`` and does not already have a
transaction in progress.
:param execution_options: a dictionary of execution options that will
be passed to :meth:`_engine.Connection.execution_options`, **when the
connection is first procured only**. If the connection is already
present within the :class:`.Session`, a warning is emitted and
the arguments are ignored.
.. versionadded:: 0.9.9
.. seealso::
:ref:`session_transaction_isolation`
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(
bind,
close_with_result=close_with_result,
execution_options=execution_options,
)
def _connection_for_bind(self, engine, execution_options=None, **kw):
if self.transaction is not None:
return self.transaction._connection_for_bind(
engine, execution_options
)
else:
conn = engine._contextual_connect(**kw)
if execution_options:
conn = conn.execution_options(**execution_options)
return conn
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
r"""Execute a SQL expression construct or string statement within
the current transaction.
Returns a :class:`_engine.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`_engine.Engine` or
:class:`_engine.Connection`.
E.g.::
result = session.execute(
user_table.select().where(user_table.c.id == 5)
)
:meth:`~.Session.execute` accepts any executable clause construct,
such as :func:`_expression.select`,
:func:`_expression.insert`,
:func:`_expression.update`,
:func:`_expression.delete`, and
:func:`_expression.text`. Plain SQL strings can be passed
as well, which in the case of :meth:`.Session.execute` only
will be interpreted the same as if it were passed via a
:func:`_expression.text` construct. That is, the following usage::
result = session.execute(
"SELECT * FROM user WHERE id=:param",
{"param":5}
)
is equivalent to::
from sqlalchemy import text
result = session.execute(
text("SELECT * FROM user WHERE id=:param"),
{"param":5}
)
The second positional argument to :meth:`.Session.execute` is an
optional parameter set. Similar to that of
:meth:`_engine.Connection.execute`, whether this is passed as a single
dictionary, or a sequence of dictionaries, determines whether the DBAPI
cursor's ``execute()`` or ``executemany()`` is used to execute the
statement. An INSERT construct may be invoked for a single row::
result = session.execute(
users.insert(), {"id": 7, "name": "somename"})
or for multiple rows::
result = session.execute(users.insert(), [
{"id": 7, "name": "somename7"},
{"id": 8, "name": "somename8"},
{"id": 9, "name": "somename9"}
])
The statement is executed within the current transactional context of
this :class:`.Session`. The :class:`_engine.Connection`
which is used
to execute the statement can also be acquired directly by
calling the :meth:`.Session.connection` method. Both methods use
a rule-based resolution scheme in order to determine the
:class:`_engine.Connection`,
which in the average case is derived directly
from the "bind" of the :class:`.Session` itself, and in other cases
can be based on the :func:`.mapper`
and :class:`_schema.Table` objects passed to the method; see the
documentation for :meth:`.Session.get_bind` for a full description of
this scheme.
The :meth:`.Session.execute` method does *not* invoke autoflush.
The :class:`_engine.ResultProxy` returned by the
:meth:`.Session.execute`
method is returned with the "close_with_result" flag set to true;
the significance of this flag is that if this :class:`.Session` is
autocommitting and does not have a transaction-dedicated
:class:`_engine.Connection` available, a temporary
:class:`_engine.Connection` is
established for the statement execution, which is closed (meaning,
returned to the connection pool) when the :class:`_engine.ResultProxy`
has
consumed all available data. This applies *only* when the
:class:`.Session` is configured with autocommit=True and no
transaction has been started.
:param clause:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`_expression.select`) or string SQL statement
to be executed.
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind. See :meth:`.Session.get_bind`
for more details.
:param bind:
Optional :class:`_engine.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`.Session.get_bind()`
to allow extensibility of "bind" schemes.
.. seealso::
:ref:`sqlexpression_toplevel` - Tutorial on using Core SQL
constructs.
:ref:`connections_toplevel` - Further information on direct
statement execution.
:meth:`_engine.Connection.execute`
- core level statement execution
method, which is :meth:`.Session.execute` ultimately uses
in order to execute the statement.
"""
clause = expression._literal_as_text(
clause, allow_coercion_to_text=True
)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {}
)
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(
clause, params=params, mapper=mapper, bind=bind, **kw
).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self._close_impl(invalidate=False)
def invalidate(self):
"""Close this Session, using connection invalidation.
This is a variant of :meth:`.Session.close` that will additionally
ensure that the :meth:`_engine.Connection.invalidate`
method will be called
on all :class:`_engine.Connection` objects. This can be called when
the database is known to be in a state where the connections are
no longer safe to be used.
E.g.::
try:
sess = Session()
sess.add(User())
sess.commit()
except gevent.Timeout:
sess.invalidate()
raise
except:
sess.rollback()
raise
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
.. versionadded:: 0.9.9
"""
self._close_impl(invalidate=True)
def _close_impl(self, invalidate):
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_self_and_parents():
transaction.close(invalidate)
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
all_states = self.identity_map.all_states() + list(self._new)
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
statelib.InstanceState._detach_states(all_states, self)
def _add_bind(self, key, bind):
try:
insp = inspect(key)
except sa_exc.NoInspectionAvailable as err:
if not isinstance(key, type):
util.raise_(
sa_exc.ArgumentError(
"Not an acceptable bind target: %s" % key
),
replace_context=err,
)
else:
self.__binds[key] = bind
else:
if insp.is_selectable:
self.__binds[insp] = bind
elif insp.is_mapper:
self.__binds[insp.class_] = bind
for selectable in insp._all_tables:
self.__binds[selectable] = bind
else:
raise sa_exc.ArgumentError(
"Not an acceptable bind target: %s" % key
)
def bind_mapper(self, mapper, bind):
"""Associate a :class:`_orm.Mapper` or arbitrary Python class with a
"bind", e.g. an :class:`_engine.Engine` or :class:`_engine.Connection`
.
The given entity is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param mapper: a :class:`_orm.Mapper` object,
or an instance of a mapped
class, or any Python class that is the base of a set of mapped
classes.
:param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_table`
"""
self._add_bind(mapper, bind)
def bind_table(self, table, bind):
"""Associate a :class:`_schema.Table` with a "bind", e.g. an
:class:`_engine.Engine`
or :class:`_engine.Connection`.
The given :class:`_schema.Table` is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param table: a :class:`_schema.Table` object,
which is typically the target
of an ORM mapping, or is present within a selectable that is
mapped.
:param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
"""
self._add_bind(table, bind)
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`_engine.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`_engine.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based first on the mapper in use, then
on the mapped class in use, then on any base classes that are
present in the ``__mro__`` of the mapped class, from more specific
superclasses to more general.
2. if clause given and session.binds is present,
locate a bind based on :class:`_schema.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the :class:`_schema.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
Note that the :meth:`.Session.get_bind` method can be overridden on
a user-defined subclass of :class:`.Session` to provide any kind
of bind resolution scheme. See the example at
:ref:`session_custom_partitioning`.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`_orm.Mapper`. The bind can be derived from a
:class:`_orm.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the
:class:`_schema.MetaData`
associated with the :class:`_schema.Table` to which the
:class:`_orm.Mapper`
is mapped for a bind.
:param clause:
A :class:`_expression.ClauseElement` (i.e.
:func:`_expression.select`,
:func:`_expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`_schema.Table`
associated with
bound :class:`_schema.MetaData`.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding."
)
if mapper is not None:
try:
mapper = inspect(mapper)
except sa_exc.NoInspectionAvailable as err:
if isinstance(mapper, type):
util.raise_(
exc.UnmappedClassError(mapper), replace_context=err,
)
else:
raise
if self.__binds:
if mapper:
for cls in mapper.class_.__mro__:
if cls in self.__binds:
return self.__binds[cls]
if clause is None:
clause = mapper.persist_selectable
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if mapper and mapper.persist_selectable.bind:
return mapper.persist_selectable.bind
context = []
if mapper is not None:
context.append("mapper %s" % mapper)
if clause is not None:
context.append("SQL expression")
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session"
% (", ".join(context))
)
def query(self, *entities, **kwargs):
"""Return a new :class:`_query.Query` object corresponding to this
:class:`.Session`."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
"""
autoflush = self.autoflush
self.autoflush = False
try:
yield self
finally:
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
try:
self.flush()
except sa_exc.StatementError as e:
# note we are reraising StatementError as opposed to
# raising FlushError with "chaining" to remain compatible
# with code that catches StatementError, IntegrityError,
# etc.
e.add_detail(
"raised as a result of Query-invoked autoflush; "
"consider using a session.no_autoflush block if this "
"flush is occurring prematurely"
)
util.raise_(e, with_traceback=sys.exc_info()[2])
def refresh(
self,
instance,
attribute_names=None,
with_for_update=None,
lockmode=None,
):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param with_for_update: optional boolean ``True`` indicating FOR UPDATE
should be used, or may be a dictionary containing flags to
indicate a more specific set of FOR UPDATE flags for the SELECT;
flags should match the parameters of
:meth:`_query.Query.with_for_update`.
Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
.. versionadded:: 1.2
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
Superseded by :paramref:`.Session.refresh.with_for_update`.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.expire_all`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._expire_state(state, attribute_names)
if with_for_update == {}:
raise sa_exc.ArgumentError(
"with_for_update should be the boolean value "
"True, or a dictionary with options. "
"A blank dictionary is ambiguous."
)
if lockmode:
with_for_update = query.LockmodeArg.parse_legacy_query(lockmode)
elif with_for_update is not None:
if with_for_update is True:
with_for_update = query.LockmodeArg()
elif with_for_update:
with_for_update = query.LockmodeArg(**with_for_update)
else:
with_for_update = None
if (
loading.load_on_ident(
self.query(object_mapper(instance)),
state.key,
refresh_state=state,
with_for_update=with_for_update,
only_load_props=attribute_names,
)
is None
):
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" % instance_str(instance)
)
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
for state in self.identity_map.all_states():
state._expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state._expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(
state.manager.mapper.cascade_iterator("refresh-expire", state)
)
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state._detach(self)
@util.deprecated(
"0.7",
"The :meth:`.Session.prune` method is deprecated along with "
":paramref:`.Session.weak_identity_map`. This method will be "
"removed in a future release.",
)
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" % state_str(state)
)
cascaded = list(
state.manager.mapper.cascade_iterator("expunge", state)
)
self._expunge_states([state] + [st_ for o, m, st_, dct_ in cascaded])
def _expunge_states(self, states, to_transient=False):
for state in states:
if state in self._new:
self._new.pop(state)
elif self.identity_map.contains_state(state):
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
elif self.transaction:
# state is "detached" from being deleted, but still present
# in the transaction snapshot
self.transaction._deleted.pop(state, None)
statelib.InstanceState._detach_states(
states, self, to_transient=to_transient
)
def _register_persistent(self, states):
"""Register all persistent objects from a flush.
This is used both for pending objects moving to the persistent
state as well as already persistent objects.
"""
pending_to_persistent = self.dispatch.pending_to_persistent or None
for state in states:
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if (
_none_set.intersection(instance_key[1])
and not mapper.allow_partial_pks
or _none_set.issuperset(instance_key[1])
):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and "
"that the mapped Column object is configured to "
"expect these generated values. Ensure also that "
"this flush() is not occurring at an inappropriate "
"time, such as within a load() event."
% state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use safe_discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.safe_discard(state)
if state in self.transaction._key_switches:
orig_key = self.transaction._key_switches[state][0]
else:
orig_key = state.key
self.transaction._key_switches[state] = (
orig_key,
instance_key,
)
state.key = instance_key
# there can be an existing state in the identity map
# that is replaced when the primary keys of two instances
# are swapped; see test/orm/test_naturalpks.py -> test_reverse
old = self.identity_map.replace(state)
if (
old is not None
and mapper._identity_key_from_state(old) == instance_key
and old.obj() is not None
):
util.warn(
"Identity map already had an identity for %s, "
"replacing it with newly flushed object. Are there "
"load operations occurring inside of an event handler "
"within the flush?" % (instance_key,)
)
state._orphaned_outside_of_session = False
statelib.InstanceState._commit_all_states(
((state, state.dict) for state in states), self.identity_map
)
self._register_altered(states)
if pending_to_persistent is not None:
for state in states.intersection(self._new):
pending_to_persistent(self, state)
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
def _register_altered(self, states):
if self._enable_transaction_accounting and self.transaction:
for state in states:
if state in self._new:
self.transaction._new[state] = True
else:
self.transaction._dirty[state] = True
def _remove_newly_deleted(self, states):
persistent_to_deleted = self.dispatch.persistent_to_deleted or None
for state in states:
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
if persistent_to_deleted is not None:
# get a strong reference before we pop out of
# self._deleted
obj = state.obj() # noqa
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
state._deleted = True
# can't call state._detach() here, because this state
# is still in the transaction snapshot and needs to be
# tracked as part of that
if persistent_to_deleted is not None:
persistent_to_deleted(self, state)
def add(self, instance, _warn=True):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
if _warn and self._warn_on_events:
self._flush_warning("Session.add()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
if self._warn_on_events:
self._flush_warning("Session.add_all()")
for instance in instances:
self.add(instance, _warn=False)
def _save_or_update_state(self, state):
state._orphaned_outside_of_session = False
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
"save-update", state, halt_on=self._contains_state
):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
if self._warn_on_events:
self._flush_warning("Session.delete()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
self._delete_impl(state, instance, head=True)
def _delete_impl(self, state, obj, head):
if state.key is None:
if head:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
else:
return
to_attach = self._before_attach(state, obj)
if state in self._deleted:
return
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
if head:
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(
state.manager.mapper.cascade_iterator("delete", state)
)
self._deleted[state] = obj
if head:
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_, o, False)
def merge(self, instance, load=True):
"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target
instance. The resulting target instance is then returned by the
method; the original source instance is left unmodified, and
un-associated with the :class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
.. versionchanged:: 1.1 - :meth:`.Session.merge` will now reconcile
pending objects with overlapping primary keys in the same way
as persistent. See :ref:`change_3601` for discussion.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the
method.
.. seealso::
:func:`.make_transient_to_detached` - provides for an alternative
means of "merging" a single object into the :class:`.Session`
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
_recursive = {}
_resolve_conflict_map = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
finally:
self.autoflush = autoflush
def _merge(
self,
state,
state_dict,
load=True,
_recursive=None,
_resolve_conflict_map=None,
):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if state in self._new:
util.warn(
"Instance %s is already pending in this Session yet is "
"being merged again; this is probably not what you want "
"to do" % state_str(state)
)
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False."
)
key = mapper._identity_key_from_state(state)
key_is_persistent = attributes.NEVER_SET not in key[1] and (
not _none_set.intersection(key[1])
or (
mapper.allow_partial_pks
and not _none_set.issuperset(key[1])
)
)
else:
key_is_persistent = True
if key in self.identity_map:
try:
merged = self.identity_map[key]
except KeyError:
# object was GC'ed right as we checked for it
merged = None
else:
merged = None
if merged is None:
if key_is_persistent and key in _resolve_conflict_map:
merged = _resolve_conflict_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False."
)
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif key_is_persistent:
merged = self.query(mapper.class_).get(key[1])
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
_resolve_conflict_map[key] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if (
existing_version is not attributes.PASSIVE_NO_RESULT
and merged_version is not attributes.PASSIVE_NO_RESULT
and existing_version != merged_version
):
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
state_str(merged_state),
merged_version,
)
)
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
# since we are copying load_options, we need to copy
# the callables_ that would have been generated by those
# load_options.
# assumes that the callables we put in state.callables_
# are not instance-specific (which they should not be)
merged_state._copy_callables(state)
for prop in mapper.iterate_properties:
prop.merge(
self,
state,
state_dict,
merged_state,
merged_dict,
load,
_recursive,
_resolve_conflict_map,
)
if not load:
# remove any history
merged_state._commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session"
% state_str(state)
)
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - "
"it can't be registered as pending" % state_str(state)
)
obj = state.obj()
to_attach = self._before_attach(state, obj)
if state not in self._new:
self._new[state] = obj
state.insert_order = len(self._new)
if to_attach:
self._after_attach(state, obj)
def _update_impl(self, state, revert_deletion=False):
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
if state._deleted:
if revert_deletion:
if not state._attached:
return
del state._deleted
else:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. "
"Use the make_transient() "
"function to send this object back "
"to the transient state." % state_str(state)
)
obj = state.obj()
# check for late gc
if obj is None:
return
to_attach = self._before_attach(state, obj)
self._deleted.pop(state, None)
if revert_deletion:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
elif revert_deletion:
self.dispatch.deleted_to_persistent(self, state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def enable_relationship_loading(self, obj):
"""Associate an object with this :class:`.Session` for related
object loading.
.. warning::
:meth:`.enable_relationship_loading` exists to serve special
use cases and is not recommended for general use.
Accesses of attributes mapped with :func:`_orm.relationship`
will attempt to load a value from the database using this
:class:`.Session` as the source of connectivity. The values
will be loaded based on foreign key and primary key values
present on this object - if not present, then those relationships
will be unavailable.
The object will be attached to this session, but will
**not** participate in any persistence operations; its state
for almost all purposes will remain either "transient" or
"detached", except for the case of relationship loading.
Also note that backrefs will often not work as expected.
Altering a relationship-bound attribute on the target object
may not fire off a backref event, if the effective value
is what was already loaded from a foreign-key-holding value.
The :meth:`.Session.enable_relationship_loading` method is
similar to the ``load_on_pending`` flag on :func:`_orm.relationship`.
Unlike that flag, :meth:`.Session.enable_relationship_loading` allows
an object to remain transient while still being able to load
related items.
To make a transient object associated with a :class:`.Session`
via :meth:`.Session.enable_relationship_loading` pending, add
it to the :class:`.Session` using :meth:`.Session.add` normally.
If the object instead represents an existing identity in the database,
it should be merged using :meth:`.Session.merge`.
:meth:`.Session.enable_relationship_loading` does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before flush()
proceeds. This method is not intended for general use.
.. seealso::
``load_on_pending`` at :func:`_orm.relationship` - this flag
allows per-relationship loading of many-to-ones on items that
are pending.
:func:`.make_transient_to_detached` - allows for an object to
be added to a :class:`.Session` without SQL emitted, which then
will unexpire attributes on access.
"""
state = attributes.instance_state(obj)
to_attach = self._before_attach(state, obj)
state._load_pending = True
if to_attach:
self._after_attach(state, obj)
def _before_attach(self, state, obj):
if state.session_id == self.hash_key:
return False
if state.session_id and state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')"
% (state_str(state), state.session_id, self.hash_key)
)
self.dispatch.before_attach(self, state)
return True
def _after_attach(self, state, obj):
state.session_id = self.hash_key
if state.modified and state._strong_obj is None:
state._strong_obj = obj
self.dispatch.after_attach(self, state)
if state.key:
self.dispatch.detached_to_persistent(self, state)
else:
self.dispatch.transient_to_pending(self, state)
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this
Session.
"""
return iter(
list(self._new.values()) + list(self.identity_map.values())
)
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations into the flush.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
"""
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush_warning(self, method):
util.warn(
"Usage of the '%s' operation is not currently supported "
"within the execution stage of the flush process. "
"Results may not be consistent. Consider using alternative "
"event listeners or connection-level operations instead." % method
)
def _is_clean(self):
return (
not self.identity_map.check_modified()
and not self._deleted
and not self._new
)
def _flush(self, objects=None):
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(o), replace_context=err,
)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = _state_mapper(state)._is_orphan(state)
is_persistent_orphan = is_orphan and state.has_identity
if (
is_orphan
and not is_persistent_orphan
and state._orphaned_outside_of_session
):
self._expunge_states([state])
else:
_reg = flush_context.register_object(
state, isdelete=is_persistent_orphan
)
assert _reg, "Failed to add object to the flush context!"
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
_reg = flush_context.register_object(state, isdelete=True)
assert _reg, "Failed to add object to the flush context!"
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(
subtransactions=True
)
try:
self._warn_on_events = True
try:
flush_context.execute()
finally:
self._warn_on_events = False
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
if not objects and self.identity_map._modified:
len_ = len(self.identity_map._modified)
statelib.InstanceState._commit_all_states(
[
(state, state.dict)
for state in self.identity_map._modified
],
instance_dict=self.identity_map,
)
util.warn(
"Attribute history events accumulated on %d "
"previously clean instances "
"within inner-flush event handlers have been "
"reset, and will not result in database updates. "
"Consider using set_committed_value() within "
"inner-flush event handlers to avoid this warning." % len_
)
# useful assertions:
# if not objects:
# assert not self.identity_map._modified
# else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
def bulk_save_objects(
self,
objects,
return_defaults=False,
update_changed_only=True,
preserve_order=True,
):
"""Perform a bulk save of the given list of objects.
The bulk save feature allows mapped objects to be used as the
source of simple INSERT and UPDATE operations which can be more easily
grouped together into higher performing "executemany"
operations; the extraction of data from the objects is also performed
using a lower-latency process that ignores whether or not attributes
have actually been modified in the case of UPDATEs, and also ignores
SQL expressions.
The objects as given are not added to the session and no additional
state is established on them, unless the ``return_defaults`` flag
is also set, in which case primary key attributes and server-side
default values will be populated.
.. versionadded:: 1.0.0
.. warning::
The bulk save feature allows for a lower-latency INSERT/UPDATE
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
INSERT/UPDATES of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param objects: a sequence of mapped object instances. The mapped
objects are persisted as is, and are **not** associated with the
:class:`.Session` afterwards.
For each object, whether the object is sent as an INSERT or an
UPDATE is dependent on the same rules used by the :class:`.Session`
in traditional operation; if the object has the
:attr:`.InstanceState.key`
attribute set, then the object is assumed to be "detached" and
will result in an UPDATE. Otherwise, an INSERT is used.
In the case of an UPDATE, statements are grouped based on which
attributes have changed, and are thus to be the subject of each
SET clause. If ``update_changed_only`` is False, then all
attributes present within each object are applied to the UPDATE
statement, which may help in allowing the statements to be grouped
together into a larger executemany(), and will also reduce the
overhead of checking history on attributes.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary key values ahead of time; however,
:paramref:`.Session.bulk_save_objects.return_defaults` **greatly
reduces the performance gains** of the method overall.
:param update_changed_only: when True, UPDATE statements are rendered
based on those attributes in each state that have logged changes.
When False, all attributes present are rendered into the SET clause
with the exception of primary key attributes.
:param preserve_order: when True, the order of inserts and updates
matches exactly the order in which the objects are given. When
False, common types of objects are grouped into inserts
and updates, to allow for more batching opportunities.
.. versionadded:: 1.3
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_update_mappings`
"""
def key(state):
return (state.mapper, state.key is not None)
obj_states = (attributes.instance_state(obj) for obj in objects)
if not preserve_order:
obj_states = sorted(obj_states, key=key)
for (mapper, isupdate), states in itertools.groupby(obj_states, key):
self._bulk_save_mappings(
mapper,
states,
isupdate,
True,
return_defaults,
update_changed_only,
False,
)
def bulk_insert_mappings(
self, mapper, mappings, return_defaults=False, render_nulls=False
):
"""Perform a bulk insert of the given list of mapping dictionaries.
The bulk insert feature allows plain Python dictionaries to be used as
the source of simple INSERT operations which can be more easily
grouped together into higher performing "executemany"
operations. Using dictionaries, there is no "history" or session
state management features in use, reducing latency when inserting
large numbers of simple rows.
The values within the dictionaries as given are typically passed
without modification into Core :meth:`_expression.Insert` constructs,
after
organizing the values within them across the tables to which
the given mapper is mapped.
.. versionadded:: 1.0.0
.. warning::
The bulk insert feature allows for a lower-latency INSERT
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
INSERT of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be inserted, in terms of the attribute
names on the mapped class. If the mapping refers to multiple tables,
such as a joined-inheritance mapping, each dictionary must contain all
keys to be populated into all tables.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary
key values ahead of time; however,
:paramref:`.Session.bulk_insert_mappings.return_defaults`
**greatly reduces the performance gains** of the method overall.
If the rows
to be inserted only refer to a single table, then there is no
reason this flag should be set as the returned default information
is not used.
:param render_nulls: When True, a value of ``None`` will result
in a NULL value being included in the INSERT statement, rather
than the column being omitted from the INSERT. This allows all
the rows being INSERTed to have the identical set of columns which
allows the full set of rows to be batched to the DBAPI. Normally,
each column-set that contains a different combination of NULL values
than the previous row must omit a different series of columns from
the rendered INSERT statement, which means it must be emitted as a
separate statement. By passing this flag, the full set of rows
are guaranteed to be batchable into one batch; the cost however is
that server-side defaults which are invoked by an omitted column will
be skipped, so care must be taken to ensure that these are not
necessary.
.. warning::
When this flag is set, **server side default SQL values will
not be invoked** for those columns that are inserted as NULL;
the NULL value will be sent explicitly. Care must be taken
to ensure that no server-side default functions need to be
invoked for the operation as a whole.
.. versionadded:: 1.1
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_save_objects`
:meth:`.Session.bulk_update_mappings`
"""
self._bulk_save_mappings(
mapper,
mappings,
False,
False,
return_defaults,
False,
render_nulls,
)
def bulk_update_mappings(self, mapper, mappings):
"""Perform a bulk update of the given list of mapping dictionaries.
The bulk update feature allows plain Python dictionaries to be used as
the source of simple UPDATE operations which can be more easily
grouped together into higher performing "executemany"
operations. Using dictionaries, there is no "history" or session
state management features in use, reducing latency when updating
large numbers of simple rows.
.. versionadded:: 1.0.0
.. warning::
The bulk update feature allows for a lower-latency UPDATE
of rows at the expense of most other unit-of-work features.
Features such as object management, relationship handling,
and SQL clause support are **silently omitted** in favor of raw
UPDATES of records.
**Please read the list of caveats at** :ref:`bulk_operations`
**before using this method, and fully test and confirm the
functionality of all code developed using these systems.**
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be updated, in terms of the attribute names
on the mapped class. If the mapping refers to multiple tables, such
as a joined-inheritance mapping, each dictionary may contain keys
corresponding to all tables. All those keys which are present and
are not part of the primary key are applied to the SET clause of the
UPDATE statement; the primary key values, which are required, are
applied to the WHERE clause.
.. seealso::
:ref:`bulk_operations`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_save_objects`
"""
self._bulk_save_mappings(
mapper, mappings, True, False, False, False, False
)
def _bulk_save_mappings(
self,
mapper,
mappings,
isupdate,
isstates,
return_defaults,
update_changed_only,
render_nulls,
):
mapper = _class_to_mapper(mapper)
self._flushing = True
transaction = self.begin(subtransactions=True)
try:
if isupdate:
persistence._bulk_update(
mapper,
mappings,
transaction,
isstates,
update_changed_only,
)
else:
persistence._bulk_insert(
mapper,
mappings,
transaction,
isstates,
return_defaults,
render_nulls,
)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
finally:
self._flushing = False
@util.deprecated_params(
passive=(
"0.8",
"The :paramref:`.Session.is_modified.passive` flag is deprecated "
"and will be removed in a future release. The flag is no longer "
"used and is ignored.",
)
)
def is_modified(self, instance, include_collections=True, passive=None):
r"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may
report ``False`` when tested with this method. This is because
the object may have received change events via attribute mutation,
thus placing it in :attr:`.Session.dirty`, but ultimately the state
is the same as that loaded from the database, resulting in no net
change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
:param passive: not used
"""
state = object_state(instance)
if not state.modified:
return False
dict_ = state.dict
for attr in state.manager.attributes:
if (
not include_collections
and hasattr(attr.impl, "get_collection")
) or not hasattr(attr.impl, "get_history"):
continue
(added, unchanged, deleted) = attr.impl.get_history(
state, dict_, passive=attributes.NO_CHANGE
)
if added or deleted:
return True
else:
return False
@property
def is_active(self):
"""True if this :class:`.Session` is in "transaction mode" and
is not in "partial rollback" state.
The :class:`.Session` in its default mode of ``autocommit=False``
is essentially always in "transaction mode", in that a
:class:`.SessionTransaction` is associated with it as soon as
it is instantiated. This :class:`.SessionTransaction` is immediately
replaced with a new one as soon as it is ended, due to a rollback,
commit, or close operation.
"Transaction mode" does *not* indicate whether
or not actual database connection resources are in use; the
:class:`.SessionTransaction` object coordinates among zero or more
actual database transactions, and starts out with none, accumulating
individual DBAPI connections as different data sources are used
within its scope. The best way to track when a particular
:class:`.Session` has actually begun to use DBAPI resources is to
implement a listener using the :meth:`.SessionEvents.after_begin`
method, which will deliver both the :class:`.Session` as well as the
target :class:`_engine.Connection` to a user-defined event listener.
The "partial rollback" state refers to when an "inner" transaction,
typically used during a flush, encounters an error and emits a
rollback of the DBAPI connection. At this point, the
:class:`.Session` is in "partial rollback" and awaits for the user to
call :meth:`.Session.rollback`, in order to close out the
transaction stack. It is in this "partial rollback" period that the
:attr:`.is_active` flag returns False. After the call to
:meth:`.Session.rollback`, the :class:`.SessionTransaction` is
replaced with a new one and :attr:`.is_active` returns ``True`` again.
When a :class:`.Session` is used in ``autocommit=True`` mode, the
:class:`.SessionTransaction` is only instantiated within the scope
of a flush call, or when :meth:`.Session.begin` is called. So
:attr:`.is_active` will always be ``False`` outside of a flush or
:meth:`.Session.begin` block in this mode, and will be ``True``
within the :meth:`.Session.begin` block as long as it doesn't enter
"partial rollback" state.
From all the above, it follows that the only purpose to this flag is
for application frameworks that wish to detect if a "rollback" is
necessary within a generic error handling routine, for
:class:`.Session` objects that would otherwise be in
"partial rollback" mode. In a typical integration case, this is also
not necessary as it is standard practice to emit
:meth:`.Session.rollback` unconditionally within the outermost
exception catch.
To track the transactional state of a :class:`.Session` fully,
use event listeners, primarily the :meth:`.SessionEvents.after_begin`,
:meth:`.SessionEvents.after_commit`,
:meth:`.SessionEvents.after_rollback` and related events.
"""
return self.transaction and self.transaction.is_active
identity_map = None
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
.. seealso::
:func:`.identity_key` - helper function to produce the keys used
in this dictionary.
"""
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return util.IdentitySet(
[
state.obj()
for state in self._dirty_states
if state not in self._deleted
]
)
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(list(self._deleted.values()))
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(list(self._new.values()))
class sessionmaker(_SessionClassMethods):
"""A configurable :class:`.Session` factory.
The :class:`.sessionmaker` factory generates new
:class:`.Session` objects when called, creating them given
the configurational arguments established here.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a method :meth:`.configure`, which can
be used to specify additional keyword arguments to the factory, which
will take effect for subsequent :class:`.Session` objects generated.
This is usually used to associate one or more :class:`_engine.Engine`
objects
with an existing :class:`.sessionmaker` factory before it is first
used::
# application starts
Session = sessionmaker()
# ... later
engine = create_engine('sqlite:///foo.db')
Session.configure(bind=engine)
sess = Session()
.. seealso:
:ref:`session_getting` - introductory text on creating
sessions using :class:`.sessionmaker`.
"""
def __init__(
self,
bind=None,
class_=Session,
autoflush=True,
autocommit=False,
expire_on_commit=True,
info=None,
**kw
):
r"""Construct a new :class:`.sessionmaker`.
All arguments here except for ``class_`` correspond to arguments
accepted by :class:`.Session` directly. See the
:meth:`.Session.__init__` docstring for more details on parameters.
:param bind: a :class:`_engine.Engine` or other :class:`.Connectable`
with
which newly created :class:`.Session` objects will be associated.
:param class\_: class to use in order to create new :class:`.Session`
objects. Defaults to :class:`.Session`.
:param autoflush: The autoflush setting to use with newly created
:class:`.Session` objects.
:param autocommit: The autocommit setting to use with newly created
:class:`.Session` objects.
:param expire_on_commit=True: the expire_on_commit setting to use
with newly created :class:`.Session` objects.
:param info: optional dictionary of information that will be available
via :attr:`.Session.info`. Note this dictionary is *updated*, not
replaced, when the ``info`` parameter is specified to the specific
:class:`.Session` construction operation.
.. versionadded:: 0.9.0
:param \**kw: all other keyword arguments are passed to the
constructor of newly created :class:`.Session` objects.
"""
kw["bind"] = bind
kw["autoflush"] = autoflush
kw["autocommit"] = autocommit
kw["expire_on_commit"] = expire_on_commit
if info is not None:
kw["info"] = info
self.kw = kw
# make our own subclass of the given class, so that
# events can be associated with it specifically.
self.class_ = type(class_.__name__, (class_,), {})
def __call__(self, **local_kw):
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
if k == "info" and "info" in local_kw:
d = v.copy()
d.update(local_kw["info"])
local_kw["info"] = d
else:
local_kw.setdefault(k, v)
return self.class_(**local_kw)
def configure(self, **new_kw):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
self.kw.update(new_kw)
def __repr__(self):
return "%s(class_=%r, %s)" % (
self.__class__.__name__,
self.class_.__name__,
", ".join("%s=%r" % (k, v) for k, v in self.kw.items()),
)
def close_all_sessions():
"""Close all sessions in memory.
This function consults a global registry of all :class:`.Session` objects
and calls :meth:`.Session.close` on them, which resets them to a clean
state.
This function is not for general use but may be useful for test suites
within the teardown scheme.
.. versionadded:: 1.3
"""
for sess in _sessions.values():
sess.close()
def make_transient(instance):
"""Alter the state of the given instance so that it is :term:`transient`.
.. note::
:func:`.make_transient` is a special-case function for
advanced use cases only.
The given mapped instance is assumed to be in the :term:`persistent` or
:term:`detached` state. The function will remove its association with any
:class:`.Session` as well as its :attr:`.InstanceState.identity`. The
effect is that the object will behave as though it were newly constructed,
except retaining any attribute / collection values that were loaded at the
time of the call. The :attr:`.InstanceState.deleted` flag is also reset
if this object had been deleted as a result of using
:meth:`.Session.delete`.
.. warning::
:func:`.make_transient` does **not** "unexpire" or otherwise eagerly
load ORM-mapped attributes that are not currently loaded at the time
the function is called. This includes attributes which:
* were expired via :meth:`.Session.expire`
* were expired as the natural effect of committing a session
transaction, e.g. :meth:`.Session.commit`
* are normally :term:`lazy loaded` but are not currently loaded
* are "deferred" via :ref:`deferred` and are not yet loaded
* were not present in the query which loaded this object, such as that
which is common in joined table inheritance and other scenarios.
After :func:`.make_transient` is called, unloaded attributes such
as those above will normally resolve to the value ``None`` when
accessed, or an empty collection for a collection-oriented attribute.
As the object is transient and un-associated with any database
identity, it will no longer retrieve these values.
.. seealso::
:func:`.make_transient_to_detached`
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_states([state])
# remove expired state
state.expired_attributes.clear()
# remove deferred callables
if state.callables:
del state.callables
if state.key:
del state.key
if state._deleted:
del state._deleted
def make_transient_to_detached(instance):
"""Make the given transient instance :term:`detached`.
.. note::
:func:`.make_transient_to_detached` is a special-case function for
advanced use cases only.
All attribute history on the given instance
will be reset as though the instance were freshly loaded
from a query. Missing attributes will be marked as expired.
The primary key attributes of the object, which are required, will be made
into the "key" of the instance.
The object can then be added to a session, or merged
possibly with the load=False flag, at which point it will look
as if it were loaded that way, without emitting SQL.
This is a special use case function that differs from a normal
call to :meth:`.Session.merge` in that a given persistent state
can be manufactured without any SQL calls.
.. versionadded:: 0.9.5
.. seealso::
:func:`.make_transient`
:meth:`.Session.enable_relationship_loading`
"""
state = attributes.instance_state(instance)
if state.session_id or state.key:
raise sa_exc.InvalidRequestError("Given object must be transient")
state.key = state.mapper._identity_key_from_state(state)
if state._deleted:
del state._deleted
state._commit_all(state.dict)
state._expire_attributes(state.dict, state.unloaded_expirable)
def object_session(instance):
"""Return the :class:`.Session` to which the given instance belongs.
This is essentially the same as the :attr:`.InstanceState.session`
accessor. See that attribute for details.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
util.raise_(
exc.UnmappedInstanceError(instance), replace_context=err,
)
else:
return _state_session(state)
_new_sessionid = util.counter()
| apache-2.0 |
earnsf/BankingTestAPI | earn-aggcat/aggcat/parser.py | 1 | 4516 | from __future__ import absolute_import
import re
from lxml import etree
try:
from collections import Counter
except ImportError:
from .counter import Counter
from .utils import remove_namespaces
def _get_item(self, index):
return self._list[index]
def _len(self):
return len(self._list)
def _iter(self):
return iter(self._list)
def _repr(self):
if hasattr(self, '_list'):
ls = [repr(l) for l in self._list[:2]]
return '<%s object [%s ...] @ %s>' % (self._name, ','.join(ls), hex(id(self)))
else:
return '<%s object @ %s>' % (self._name, hex(id(self)))
class Objectify(object):
"""Take XML output and turn it into a Pythonic Object
The goals are to:
* Provide an object that resembles the XML structure
* Have the ability to go back to XML from the object
"""
def __init__(self, xml):
# raw xml
self.xml = xml
# parse the tree with lxml
self.tree = etree.fromstring(remove_namespaces(etree.XML(xml)))
# regex pattern for tag name cleanup
self.tag_pattern = re.compile("(?!^)([A-Z]+)")
self.root_tag = self.tree.tag
# create a base object wrapper
self.obj = self._create_object('Objectified XML')
# check to see this is only one node with no children
# Ex. get_customer_accounts is empty
if not self.tree.getchildren():
self.obj = self._create_object(self.tree.tag)
else:
self._walk_and_objectify(self.tree, self.obj)
def _create_object(self, name, attributes={}):
"""Dynamically create an object"""
attributes.update({
'_name': name.capitalize(),
'__repr__': _repr
})
return type(name.capitalize(), (object,), attributes)()
def _create_list_object(self, name):
"""Dynamically create an object that has list type functionality"""
return self._create_object(name, {
'_list': [],
'__len__': _len,
'__iter__': _iter,
'__getitem__': _get_item
})
def _clean_tag_name(self, tag_name):
"""Convert the CamelCase format of tag name to
a camel_case format"""
return re.sub(self.tag_pattern, r'_\1', tag_name).lower()
def _is_list_xml(self, element):
"""Detect if the next set of XML elements contain duplicates
which means it is a listable set of elements"""
tags = []
for e in element.xpath('./*'):
tags.append(e.tag)
for count in Counter(tags).values():
if count > 1:
return True
return False
def _walk_and_objectify(self, element, obj):
"""Walk the XML tree recursively and make objects out of the structure"""
if element.getchildren():
# look ahead and create a list object instead
needs_list_obj = self._is_list_xml(element)
if needs_list_obj:
new_obj = self._create_list_object(element.tag)
else:
new_obj = self._create_object(element.tag)
obj_attr_value = getattr(obj, element.tag, None)
has_list = hasattr(obj, '_list')
if obj_attr_value is None and not has_list:
setattr(obj, element.tag, new_obj)
else:
l = getattr(obj, '_list')
l.append(new_obj)
setattr(obj, '_list', l)
for child in element.getchildren():
self._walk_and_objectify(child, new_obj)
else:
setattr(obj, self._clean_tag_name(element.tag), element.text)
def get_object(self):
root_obj = self.obj
if hasattr(self.obj, self.root_tag):
root_obj = getattr(self.obj, self.root_tag)
# sometimes there is only one attribute that is a converted object
# return this one instead of the surrounding object. I am not
# sure this is the desired result, but i'll leave it for now
# this is causing an ERROR!!
"""
appended_attrs = [
k for k
in root_obj.__dict__.iterkeys()
if not k.startswith('_') and not '_' in k
]
if len(appended_attrs) == 1:
root_obj = getattr(root_obj, appended_attrs.pop())
"""
# append the to_xml() attribute to you can easily get the xml from the root object
root_obj.to_xml = lambda: self.xml
return root_obj
| gpl-3.0 |
vathpela/anaconda | pyanaconda/ui/gui/spokes/software_selection.py | 1 | 25487 | # Software selection spoke classes
#
# Copyright (C) 2011-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("Pango", "1.0")
from gi.repository import Gtk, Pango
from pyanaconda.flags import flags
from pyanaconda.core.i18n import _, C_, CN_
from pyanaconda.payload import PackagePayload, payloadMgr, NoSuchGroup, PayloadError
from pyanaconda.threading import threadMgr, AnacondaThread
from pyanaconda.core import util, constants
from pyanaconda.ui.communication import hubQ
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui.spokes.lib.detailederror import DetailedErrorDialog
from pyanaconda.ui.gui.utils import blockedHandler, escape_markup
from pyanaconda.core.async_utils import async_action_wait
from pyanaconda.ui.categories.software import SoftwareCategory
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
import sys, copy
__all__ = ["SoftwareSelectionSpoke"]
class SoftwareSelectionSpoke(NormalSpoke):
"""
.. inheritance-diagram:: SoftwareSelectionSpoke
:parts: 3
"""
builderObjects = ["addonStore", "environmentStore", "softwareWindow"]
mainWidgetName = "softwareWindow"
uiFile = "spokes/software_selection.glade"
helpFile = "SoftwareSpoke.xml"
category = SoftwareCategory
icon = "package-x-generic-symbolic"
title = CN_("GUI|Spoke", "_SOFTWARE SELECTION")
# Add-on selection states
# no user interaction with this add-on
_ADDON_DEFAULT = 0
# user selected
_ADDON_SELECTED = 1
# user de-selected
_ADDON_DESELECTED = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._errorMsgs = None
self._tx_id = None
self._selectFlag = False
self._environmentListBox = self.builder.get_object("environmentListBox")
self._addonListBox = self.builder.get_object("addonListBox")
# Connect viewport scrolling with listbox focus events
environmentViewport = self.builder.get_object("environmentViewport")
addonViewport = self.builder.get_object("addonViewport")
self._environmentListBox.set_focus_vadjustment(Gtk.Scrollable.get_vadjustment(environmentViewport))
self._addonListBox.set_focus_vadjustment(Gtk.Scrollable.get_vadjustment(addonViewport))
# Used to store how the user has interacted with add-ons for the default add-on
# selection logic. The dictionary keys are group IDs, and the values are selection
# state constants. See refreshAddons for how the values are used.
self._addonStates = {}
# Create a RadioButton that will never be displayed to use as the group for the
# environment radio buttons. This way the environment radio buttons can all appear
# unselected in the case of modifying data from kickstart.
self._firstRadio = Gtk.RadioButton(group=None)
# Used for detecting whether anything's changed in the spoke.
self._origAddons = []
self._origEnvironment = None
# Whether we are using package selections from a kickstart
self._kickstarted = flags.automatedInstall and self.data.packages.seen
# Whether the payload is in an error state
self._error = False
# Register event listeners to update our status on payload events
payloadMgr.addListener(payloadMgr.STATE_PACKAGE_MD, self._downloading_package_md)
payloadMgr.addListener(payloadMgr.STATE_GROUP_MD, self._downloading_group_md)
payloadMgr.addListener(payloadMgr.STATE_FINISHED, self._payload_finished)
payloadMgr.addListener(payloadMgr.STATE_ERROR, self._payload_error)
# Add an invisible radio button so that we can show the environment
# list with no radio buttons ticked
self._fakeRadio = Gtk.RadioButton(group=None)
self._fakeRadio.set_active(True)
# Payload event handlers
def _downloading_package_md(self):
# Reset the error state from previous payloads
self._error = False
hubQ.send_message(self.__class__.__name__, _(constants.PAYLOAD_STATUS_PACKAGE_MD))
def _downloading_group_md(self):
hubQ.send_message(self.__class__.__name__, _(constants.PAYLOAD_STATUS_GROUP_MD))
@property
def environment(self):
"""A wrapper for the environment specification in kickstart"""
return self.data.packages.environment
@environment.setter
def environment(self, value):
self.data.packages.environment = value
@property
def environmentid(self):
"""Return the "machine readable" environment id
Alternatively we could have just "canonicalized" the
environment description to the "machine readable" format
when reading it from kickstart for the first time.
But this could result in input and output kickstart,
which would be rather confusing for the user.
So we don't touch the specification from kickstart
if it is valid and use this property when we need
the "machine readable" form.
"""
try:
return self.payload.environmentId(self.environment)
except NoSuchGroup:
return None
@property
def environment_valid(self):
"""Return if the currently set environment is valid
(represents an environment known by the payload)
"""
# None means the environment has not been set by the user,
# which means:
# * set the default environment during interactive installation
# * ask user to specify an environment during kickstart installation
if self.environment is None:
return True
else:
return self.environmentid in self.payload.environments
def _payload_finished(self):
if self.environment_valid:
log.info("using environment from kickstart: %s", self.environment)
else:
log.error("unknown environment has been specified in kickstart and will be ignored: %s",
self.data.packages.environment)
# False means that the environment has been set to an invalid value and needs to
# be manually set to a valid one.
self.environment = False
def _payload_error(self):
self._error = True
hubQ.send_message(self.__class__.__name__, payloadMgr.error)
def _apply(self):
# Environment needs to be set during a GUI installation, but is not required
# for a kickstart install (even partial)
if not self.environment:
log.debug("Environment is not set, skip user packages settings")
return
# NOTE: This block is skipped for kickstart where addons and _origAddons will
# both be [], preventing it from wiping out the kickstart's package selection
addons = self._get_selected_addons()
if not self._kickstarted and set(addons) != set(self._origAddons):
self._selectFlag = False
self.payload.data.packages.packageList = []
self.payload.data.packages.groupList = []
self.payload.selectEnvironment(self.environment)
log.debug("Environment selected for installation: %s", self.environment)
log.debug("Groups selected for installation: %s", addons)
for group in addons:
self.payload.selectGroup(group)
# And then save these values so we can check next time.
self._origAddons = addons
self._origEnvironment = self.environment
hubQ.send_not_ready(self.__class__.__name__)
hubQ.send_not_ready("SourceSpoke")
threadMgr.add(AnacondaThread(name=constants.THREAD_CHECK_SOFTWARE,
target=self.checkSoftwareSelection))
def apply(self):
self._apply()
def checkSoftwareSelection(self):
from pyanaconda.payload import DependencyError
hubQ.send_message(self.__class__.__name__, _("Checking software dependencies..."))
try:
self.payload.checkSoftwareSelection()
except DependencyError as e:
self._errorMsgs = str(e)
hubQ.send_message(self.__class__.__name__, _("Error checking software dependencies"))
self._tx_id = None
else:
self._errorMsgs = None
self._tx_id = self.payload.txID
finally:
hubQ.send_ready(self.__class__.__name__, False)
hubQ.send_ready("SourceSpoke", False)
@property
def completed(self):
processingDone = bool(not threadMgr.get(constants.THREAD_CHECK_SOFTWARE) and
not threadMgr.get(constants.THREAD_PAYLOAD) and
not self._errorMsgs and self.txid_valid)
# * we should always check processingDone before checking the other variables,
# as they might be inconsistent until processing is finished
# * we can't let the installation proceed until a valid environment has been set
if processingDone:
if self.environment is not None:
# if we have environment it needs to be valid
return self.environment_valid
# if we don't have environment we need to at least have the %packages
# section in kickstart
elif self._kickstarted:
return True
# no environment and no %packages section -> manual intervention is needed
else:
return False
else:
return False
@property
def changed(self):
if not self.environment:
return True
addons = self._get_selected_addons()
# Don't redo dep solving if nothing's changed.
if self.environment == self._origEnvironment and set(addons) == set(self._origAddons) and \
self.txid_valid:
return False
return True
@property
def mandatory(self):
return True
@property
def ready(self):
# By default, the software selection spoke is not ready. We have to
# wait until the installation source spoke is completed. This could be
# because the user filled something out, or because we're done fetching
# repo metadata from the mirror list, or we detected a DVD/CD.
return bool(not threadMgr.get(constants.THREAD_SOFTWARE_WATCHER) and
not threadMgr.get(constants.THREAD_PAYLOAD) and
not threadMgr.get(constants.THREAD_CHECK_SOFTWARE) and
self.payload.baseRepo is not None)
@property
def showable(self):
return isinstance(self.payload, PackagePayload)
@property
def status(self):
if self._errorMsgs:
return _("Error checking software selection")
if not self.ready:
return _("Installation source not set up")
if not self.txid_valid:
return _("Source changed - please verify")
# kickstart installation
if flags.automatedInstall:
if self._kickstarted:
# %packages section is present in kickstart but environment is not set
if self.environment is None:
return _("Custom software selected")
# environment is set to an invalid value
elif not self.environment_valid:
return _("Invalid environment specified in kickstart")
# we have no packages section in the kickstart and no environment has been set
elif not self.environment:
return _("Nothing selected")
if not flags.automatedInstall:
if not self.environment:
# No environment yet set
return _("Nothing selected")
elif not self.environment_valid:
# selected environment is not valid, this can happen when a valid environment
# is selected (by default, manually or from kickstart) and then the installation
# source is switched to one where the selected environment is no longer valid
return _("Selected environment is not valid")
return self.payload.environmentDescription(self.environment)[0]
def initialize(self):
super().initialize()
self.initialize_start()
threadMgr.add(AnacondaThread(name=constants.THREAD_SOFTWARE_WATCHER,
target=self._initialize))
def _initialize(self):
threadMgr.wait(constants.THREAD_PAYLOAD)
# Select groups which should be selected by kickstart
try:
for group in self.payload.selectedGroupsIDs():
if self.environment and self.payload.environmentOptionIsDefault(self.environment, group):
self._addonStates[group] = self._ADDON_DEFAULT
else:
self._addonStates[group] = self._ADDON_SELECTED
except PayloadError as e:
# Group translation is not supported
log.warning(e)
# It's better to have all or nothing selected from kickstart
self._addonStates = {}
if not self._kickstarted:
# having done all the slow downloading, we need to do the first refresh
# of the UI here so there's an environment selected by default. This
# happens inside the main thread by necessity. We can't do anything
# that takes any real amount of time, or it'll block the UI from
# updating.
if not self._first_refresh():
return
hubQ.send_ready(self.__class__.__name__, False)
# If packages were provided by an input kickstart file (or some other means),
# we should do dependency solving here.
if not self._error:
self._apply()
# report that software spoke initialization has been completed
self.initialize_done()
@async_action_wait
def _first_refresh(self):
self.refresh()
return True
def _add_row(self, listbox, name, desc, button, clicked):
row = Gtk.ListBoxRow()
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
button.set_valign(Gtk.Align.START)
button.connect("toggled", clicked, row)
box.add(button)
label = Gtk.Label(label="<b>%s</b>\n%s" % (escape_markup(name), escape_markup(desc)),
use_markup=True, wrap=True, wrap_mode=Pango.WrapMode.WORD_CHAR,
hexpand=True, xalign=0, yalign=0.5)
box.add(label)
row.add(box)
listbox.insert(row, -1)
def refresh(self):
super().refresh()
threadMgr.wait(constants.THREAD_PAYLOAD)
firstEnvironment = True
self._clear_listbox(self._environmentListBox)
# If no environment is selected, use the default from the instclass.
# If nothing is set in the instclass, the first environment will be
# selected below.
if not self.environment and self.payload.instclass and \
self.payload.instclass.defaultPackageEnvironment in self.payload.environments:
self.environment = self.payload.instclass.defaultPackageEnvironment
# create rows for all valid environments
for environmentid in self.payload.environments:
(name, desc) = self.payload.environmentDescription(environmentid)
# use the invisible radio button as a group for all environment
# radio buttons
radio = Gtk.RadioButton(group=self._fakeRadio)
# automatically select the first environment if we are on
# manual install and the install class does not specify one
if firstEnvironment and not flags.automatedInstall: # manual installation
#
# Note about self.environment being None:
# =======================================
# None indicates that an environment has not been set, which is a valid
# value of the environment variable.
# Only non existing environments are evaluated as invalid
if not self.environment_valid or self.environment is None:
self.environment = environmentid
firstEnvironment = False
# check if the selected environment (if any) does match the current row
# and tick the radio button if it does
radio.set_active(self.environment_valid and self.environmentid == environmentid)
self._add_row(self._environmentListBox, name, desc, radio, self.on_radio_button_toggled)
self.refreshAddons()
self._environmentListBox.show_all()
self._addonListBox.show_all()
def _addAddon(self, grp):
(name, desc) = self.payload.groupDescription(grp)
if grp in self._addonStates:
# If the add-on was previously selected by the user, select it
if self._addonStates[grp] == self._ADDON_SELECTED:
selected = True
# If the add-on was previously de-selected by the user, de-select it
elif self._addonStates[grp] == self._ADDON_DESELECTED:
selected = False
# Otherwise, use the default state
else:
selected = self.payload.environmentOptionIsDefault(self.environmentid, grp)
else:
selected = self.payload.environmentOptionIsDefault(self.environmentid, grp)
check = Gtk.CheckButton()
check.set_active(selected)
self._add_row(self._addonListBox, name, desc, check, self.on_checkbox_toggled)
@property
def _addSep(self):
""" Whether the addon list contains a separator. """
return len(self.payload.environmentAddons[self.environmentid][0]) > 0 and \
len(self.payload.environmentAddons[self.environmentid][1]) > 0
def refreshAddons(self):
if self.environment and (self.environmentid in self.payload.environmentAddons):
self._clear_listbox(self._addonListBox)
# We have two lists: One of addons specific to this environment,
# and one of all the others. The environment-specific ones will be displayed
# first and then a separator, and then the generic ones. This is to make it
# a little more obvious that the thing on the left side of the screen and the
# thing on the right side of the screen are related.
#
# If a particular add-on was previously selected or de-selected by the user, that
# state will be used. Otherwise, the add-on will be selected if it is a default
# for this environment.
for grp in self.payload.environmentAddons[self.environmentid][0]:
self._addAddon(grp)
# This marks a separator in the view - only add it if there's both environment
# specific and generic addons.
if self._addSep:
self._addonListBox.insert(Gtk.Separator(), -1)
for grp in self.payload.environmentAddons[self.environmentid][1]:
self._addAddon(grp)
self._selectFlag = True
if self._errorMsgs:
self.set_warning(_("Error checking software dependencies. <a href=\"\">Click for details.</a>"))
else:
self.clear_info()
def _allAddons(self):
if self.environmentid in self.payload.environmentAddons:
addons = copy.copy(self.payload.environmentAddons[self.environmentid][0])
if self._addSep:
addons.append('')
addons += self.payload.environmentAddons[self.environmentid][1]
else:
addons = []
return addons
def _get_selected_addons(self):
retval = []
addons = self._allAddons()
for (ndx, row) in enumerate(self._addonListBox.get_children()):
box = row.get_children()[0]
if isinstance(box, Gtk.Separator):
continue
button = box.get_children()[0]
if button.get_active():
retval.append(addons[ndx])
return retval
def _mark_addon_selection(self, grpid, selected):
# Mark selection or return its state to the default state
if selected:
if self.payload.environmentOptionIsDefault(self.environment, grpid):
self._addonStates[grpid] = self._ADDON_DEFAULT
else:
self._addonStates[grpid] = self._ADDON_SELECTED
else:
if not self.payload.environmentOptionIsDefault(self.environment, grpid):
self._addonStates[grpid] = self._ADDON_DEFAULT
else:
self._addonStates[grpid] = self._ADDON_DESELECTED
def _clear_listbox(self, listbox):
for child in listbox.get_children():
listbox.remove(child)
del(child)
@property
def txid_valid(self):
return self._tx_id == self.payload.txID
# Signal handlers
def on_radio_button_toggled(self, radio, row):
# If the radio button toggled to inactive, don't reactivate the row
if not radio.get_active():
return
row.activate()
def on_environment_activated(self, listbox, row):
if not self._selectFlag:
return
# GUI selections means that packages are no longer coming from kickstart
self._kickstarted = False
box = row.get_children()[0]
button = box.get_children()[0]
with blockedHandler(button, self.on_radio_button_toggled):
button.set_active(True)
# Mark the clicked environment as selected and update the screen.
self.environment = self.payload.environments[row.get_index()]
self.refreshAddons()
self._addonListBox.show_all()
def on_checkbox_toggled(self, button, row):
# Select the addon. The button is already toggled.
self._select_addon_at_row(row, button.get_active())
def on_addon_activated(self, listbox, row):
# Skip the separator.
box = row.get_children()[0]
if isinstance(box, Gtk.Separator):
return
# Select the addon. The button is not toggled yet.
button = box.get_children()[0]
self._select_addon_at_row(row, not button.get_active())
def _select_addon_at_row(self, row, is_selected):
# GUI selections means that packages are no longer coming from kickstart.
self._kickstarted = False
# Activate the row.
with blockedHandler(row.get_parent(), self.on_addon_activated):
row.activate()
# Activate the button.
box = row.get_children()[0]
button = box.get_children()[0]
with blockedHandler(button, self.on_checkbox_toggled):
button.set_active(is_selected)
# Mark the selection.
addons = self._allAddons()
group = addons[row.get_index()]
self._mark_addon_selection(group, is_selected)
def on_info_bar_clicked(self, *args):
if not self._errorMsgs:
return
label = _("The software marked for installation has the following errors. "
"This is likely caused by an error with your installation source. "
"You can quit the installer, change your software source, or change "
"your software selections.")
dialog = DetailedErrorDialog(self.data,
buttons=[C_("GUI|Software Selection|Error Dialog", "_Quit"),
C_("GUI|Software Selection|Error Dialog", "_Modify Software Source"),
C_("GUI|Software Selection|Error Dialog", "Modify _Selections")],
label=label)
with self.main_window.enlightbox(dialog.window):
dialog.refresh(self._errorMsgs)
rc = dialog.run()
dialog.window.destroy()
if rc == 0:
# Quit.
util.ipmi_abort(scripts=self.data.scripts)
sys.exit(0)
elif rc == 1:
# Send the user to the installation source spoke.
self.skipTo = "SourceSpoke"
self.window.emit("button-clicked")
elif rc == 2:
# Close the dialog so the user can change selections.
pass
else:
pass
| gpl-2.0 |
rainbowbreeze/sanity-check-pavia | shell/mainfile.py | 1 | 1227 | import json
__author__ = 'rainbowbreeze'
import os
from logic.contentdownloader import ContentDownloader
from logic.scraper import Scraper
from logic.itemsmanager import ItemsManager
def dostuff():
with open('mainfile.py', 'r') as f:
read_line = f.read()
print(read_line)
# http://www.artima.com/weblogs/viewpost.jsp?thread=4829
def main():
# Downloads the page
downloader = ContentDownloader()
page = downloader.downloadPage("http://www.sanmatteo.org/site/home/il-san-matteo/albo-on-line.html")
if not page:
print("Cannot download the page :(")
return
# Parses it
scraper = Scraper()
items_scraped = scraper.parsePage(page)
items_manager = ItemsManager()
new_items = items_manager.find_new_item_to_download(items_scraped)
#for bando in new_items:
#print(bando.__dict__)
#print(json.dumps(bando))
#base, ext = os.path.splitext(bando.url)
#print base, ext
#items_manager.downloadBando(bando)
print "Total new item to download:", len(new_items)
# Finally, saves the new items to the control file
items_manager.save_to_control_file(new_items)
return
if __name__ == "__main__":
main() | gpl-3.0 |
angelapper/edx-platform | lms/djangoapps/shoppingcart/tests/test_context_processor.py | 10 | 3318 | """
Unit tests for shoppingcart context_processor
"""
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from mock import Mock, patch
from course_modes.tests.factories import CourseModeFactory
from shoppingcart.context_processor import user_has_cart_context_processor
from shoppingcart.models import Order, PaidCourseRegistration
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class UserCartContextProcessorUnitTest(ModuleStoreTestCase):
"""
Unit test for shoppingcart context_processor
"""
def setUp(self):
super(UserCartContextProcessorUnitTest, self).setUp()
self.user = UserFactory.create()
self.request = Mock()
def add_to_cart(self):
"""
Adds content to self.user's cart
"""
course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
CourseModeFactory.create(course_id=course.id)
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, course.id)
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': False, 'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_no_enable_shoppingcart(self):
"""
Tests when FEATURES['ENABLE_SHOPPING_CART'] is not set
"""
self.add_to_cart()
self.request.user = self.user
context = user_has_cart_context_processor(self.request)
self.assertFalse(context['should_display_shopping_cart_func']())
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True, 'ENABLE_PAID_COURSE_REGISTRATION': False})
def test_no_enable_paid_course_registration(self):
"""
Tests when FEATURES['ENABLE_PAID_COURSE_REGISTRATION'] is not set
"""
self.add_to_cart()
self.request.user = self.user
context = user_has_cart_context_processor(self.request)
self.assertFalse(context['should_display_shopping_cart_func']())
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True, 'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_anonymous_user(self):
"""
Tests when request.user is anonymous
"""
self.request.user = AnonymousUser()
context = user_has_cart_context_processor(self.request)
self.assertFalse(context['should_display_shopping_cart_func']())
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True, 'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_no_items_in_cart(self):
"""
Tests when request.user doesn't have a cart with items
"""
self.request.user = self.user
context = user_has_cart_context_processor(self.request)
self.assertFalse(context['should_display_shopping_cart_func']())
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True, 'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_items_in_cart(self):
"""
Tests when request.user has a cart with items
"""
self.add_to_cart()
self.request.user = self.user
context = user_has_cart_context_processor(self.request)
self.assertTrue(context['should_display_shopping_cart_func']())
| agpl-3.0 |
vakkov/android-n900-nitdroid_kernel | tools/perf/scripts/python/sctop.py | 895 | 1936 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import thread
import time
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40d %10d\n" % (id, val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
DefyVentures/edx-platform | common/test/acceptance/pages/studio/settings.py | 16 | 2936 | """
Course Schedule and Details Settings page.
"""
from bok_choy.promise import EmptyPromise
from .course_page import CoursePage
from .utils import press_the_notification_button
class SettingsPage(CoursePage):
"""
Course Schedule and Details Settings page.
"""
url_path = "settings/details"
def is_browser_on_page(self):
return self.q(css='body.view-settings').present
def get_elements(self, css_selector):
self.wait_for_element_presence(
css_selector,
'Elements matching "{}" selector are present'.format(css_selector)
)
results = self.q(css=css_selector)
return results
def get_element(self, css_selector):
results = self.get_elements(css_selector=css_selector)
return results[0] if results else None
@property
def pre_requisite_course_options(self):
"""
Returns the pre-requisite course drop down field options.
"""
return self.get_elements('#pre-requisite-course')
@property
def entrance_exam_field(self):
"""
Returns the enable entrance exam checkbox.
"""
return self.get_element('#entrance-exam-enabled')
@property
def alert_confirmation_title(self):
"""
Returns the alert confirmation element, which contains text
such as 'Your changes have been saved.'
"""
return self.get_element('#alert-confirmation-title')
def require_entrance_exam(self, required=True):
"""
Set the entrance exam requirement via the checkbox.
"""
checkbox = self.entrance_exam_field
selected = checkbox.is_selected()
if required and not selected:
checkbox.click()
self.wait_for_element_visibility(
'#entrance-exam-minimum-score-pct',
'Entrance exam minimum score percent is visible'
)
if not required and selected:
checkbox.click()
self.wait_for_element_invisibility(
'#entrance-exam-minimum-score-pct',
'Entrance exam minimum score percent is invisible'
)
def save_changes(self, wait_for_confirmation=True):
"""
Clicks save button, waits for confirmation unless otherwise specified
"""
press_the_notification_button(self, "save")
if wait_for_confirmation:
self.wait_for_element_visibility(
'#alert-confirmation-title',
'Save confirmation message is visible'
)
def refresh_page(self, wait_for_confirmation=True):
"""
Reload the page.
"""
self.browser.refresh()
if wait_for_confirmation:
EmptyPromise(
lambda: self.q(css='body.view-settings').present,
'Page is refreshed'
).fulfill()
self.wait_for_ajax()
| agpl-3.0 |
cwoac/TTANR | ttsanr.py | 1 | 8684 | #!/usr/bin/env python27
import ttsutil
import os.path
import urllib
import json
import untangle
import argparse
import shutil
import PIL.Image
cards={}
debug=True
# TTS util overrides
def make_filename(image):
return ttsutil.make_cache_filename(image,"ANR")
def make_cache_dir():
ttsutil.make_cache_dir("ANR")
def build_chest_file(deck,base_url):
chest=ttsutil.build_chest_file(deck,base_url)
if deck['side']=='Corp':
chest['ObjectStates'][0]['Transform']['rotY']=90
# check for special exceptions
if deck['jinteki-biotech']:
chest['ObjectStates'][0]['CustomDeck']['2']= {
"FaceURL":base_url+'08012-id.jpg',
"BackURL":base_url+'08012-id-back.jpg'
}
chest['ObjectStates'][0]['DeckIDs']=[200,201,202]+chest['ObjectStates'][0]['DeckIDs']
return chest
def get_runner_back():
filename = os.path.join("cards","ANR","runner-back.png")
if not os.path.isfile(filename):
print("No runner back found. Downloading")
data=urllib.urlopen("http://vignette1.wikia.nocookie.net/ancur/images/a/a0/Runner_back.png/revision/latest").read()
fh=open(filename,'wb')
fh.write(data)
fh.close()
return ttsutil.load_image_at_size(filename)
def get_corp_back():
filename = os.path.join("cards","ANR","corp-back.png")
if not os.path.isfile(filename):
print("No corp back found. Downloading")
data=urllib.urlopen("http://vignette3.wikia.nocookie.net/ancur/images/c/c3/Corp_back.png/revision/latest").read()
fh=open(filename,'wb')
fh.write(data)
fh.close()
return ttsutil.load_image_at_size(filename)
def get_card(id):
if not cards.has_key(id):
filename = make_filename(id+".json")
if not os.path.isfile(filename):
make_cache_dir()
print "Downloading card id: %s" % id
data=urllib.urlopen("http://netrunnerdb.com/api/card/%s" % id).read()
fh=open(filename,'w')
fh.write(data)
fh.close()
data=open(filename).read()
j_data=json.loads(data)
cards[id]=j_data[0]
image_filename = make_filename(id+".png")
if not os.path.isfile(image_filename):
make_cache_dir()
print "Downloading card image to: %s" % image_filename
data=urllib.urlopen("http://netrunnerdb.com/%s" % j_data[0]['imagesrc']).read()
fh=open(image_filename,'wb')
fh.write(data)
fh.close()
return cards[id]
def get_flip_image(id,idx):
if id!='08012':
print("Unknown flip id %s" % id )
return
if idx!='A' and idx!='B' and idx!='C':
print("Unknown flip index %s for %s" % (idx,id) )
return
filename = make_filename(id+idx+".png")
if not os.path.isfile(filename):
make_cache_dir()
print "Downloading card image to: %s" % filename
# TODO: figure out a better way of doing this.
data=None
if idx=='A':
data=urllib.urlopen("http://vignette2.wikia.nocookie.net/ancur/images/9/96/08012A.png").read()
if idx=='B':
data=urllib.urlopen("http://vignette3.wikia.nocookie.net/ancur/images/6/6a/08012B.png").read()
if idx=='C':
data=urllib.urlopen("http://vignette3.wikia.nocookie.net/ancur/images/a/a1/08012C.png").read()
fh=open(filename,'wb')
fh.write(data)
fh.close()
return ttsutil.load_image_at_size(filename)
def print_deck(deck):
print('''
Deck: %s
Filename: %s
Side: %s
Size: %s
''' % ( deck['name'],deck['filename'],deck['side'],ttsutil.count_deck(deck)))
def load_netrunnerdb_deck(id):
print("Attempting to load deck %s from netrunnerdb" % id )
data=urllib.urlopen("http://netrunnerdb.com/api/decklist/%s" % id).read()
j_data=json.loads(data)
deck={
'name':j_data['name'],
'cards':[],
'filename':ttsutil.sanitise_filename(j_data['name']),
'jinteki-biotech':False
}
for id in j_data['cards'].keys():
card=get_card(id)
if card['type_code']=='identity':
if id=='08012':
deck['jinteki-biotech']=True
else:
deck['cards'].insert(0,(id,1))
deck['side']=card['side']
else:
deck['cards'].append((id,j_data['cards'][id]))
if debug:
print_deck(deck)
return deck
def load_octgn_deck(filename):
print("Attempting to load octgn deck from %s" % filename )
deckXML = untangle.parse(filename)
deck={
'cards':[],
'name':os.path.splitext(os.path.basename(filename))[0],
'jinteki-biotech':False
}
# add id
id=deckXML.deck.section[0].card['id'][-5:]
if id=='08012':
deck['jinteki-biotech']=True
else:
deck['cards'].append((id,1))
idcard=get_card(id)
deck['side']=idcard['side']
deck['filename']=ttsutil.sanitise_filename(deck['name'])
# add the rest
for card in deckXML.deck.section[1].card:
deck['cards'].append((card['id'][-5:],int(card['qty'])))
if debug:
print_deck(deck)
return deck
def build_08012_image():
# build mini-deck for flip ID Jinteki Biotech
im = PIL.Image.new('RGBA',(10*ttsutil.imgW,7*ttsutil.imgH),(0,0,0,0))
offX=0
image=get_flip_image('08012','A')
im.paste(image,(offX,0))
offX+=ttsutil.imgW
image=get_flip_image('08012','B')
im.paste(image,(offX,0))
offX+=ttsutil.imgW
image=get_flip_image('08012','C')
im.paste(image,(offX,0))
back=ttsutil.get_cache_image('08012','ANR')
im.paste(back,(2700,2514))
return im
def load_anr_back(deck):
back=None
if deck['back_filename']:
print "loading custom back %s" % deck['back_filename']
back=ttsutil.load_image_at_size(deck['back_filename'])
else:
if deck['side']=='Corp':
back=get_corp_back()
else:
back=get_runner_back()
return back
def write_files(deck,base_url,write_local,local_target,install):
chest=build_chest_file(deck,base_url)
back_image=load_anr_back(deck)
ttsutil.write_files(deck,chest,base_url,write_local,local_target,install,back_image,"ANR")
if not deck['jinteki-biotech']:
return
jbDeckImage=build_08012_image()
jbBackImage=ttsutil.get_cache_image('08012','ANR')
basefilename=deck['filename']
jbDeckFilename=os.path.join(local_target,"08012-id.jpg")
jbBackFilename=os.path.join(local_target,"08012-id-back.jpg")
if write_local:
print("Writing %s" % jbDeckFilename)
print("Writing %s" % jbBackFilename)
jbDeckImage.save(jbDeckFilename,'JPEG')
jbBackImage.save(jbBackFilename,'JPEG')
if install:
ttsJbDeckFilename=ttsutil.make_tts_image_filename(base_url+jbDeckFilename)
ttsJbBackFilename=ttsutil.make_tts_image_filename(base_url+jbBackFilename)
print("Writing %s" % ttsJbDeckFilename)
jbDeckImage.save(ttsJbDeckFilename,'JPEG')
print("Writing %s" % ttsJbBackFilename)
jbBackImage.save(ttsJbBackFilename,'JPEG')
def main():
parser = argparse.ArgumentParser(description="Create a set of files for loading ANR decks into TableTop Simulator")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-n","--netrunnerdb",metavar="ID",help="Load deck from netrunnerdb using given ID.")
group.add_argument("-o","--octgn",metavar="file",help="Load the given o8n file (in octgn format).")
parser.add_argument("-b","--back",metavar="backgroundFile",help="Override default back with given file.")
parser.add_argument("-i","--install",action="store_true",help="Install files into local TTS install.")
parser.add_argument("-w","--writelocal",action="store_true",help="Write files into the current directory.")
parser.add_argument("-u","--url",help="Base url for where the images will be made availiable")
args = parser.parse_args()
if not (args.install or args.url):
print("Warning: Neither install (-i) or url (-u) has been specified. You probably don't want to do this.")
if not (args.install or args.writelocal):
parser.error("At least one of -i or -w is required.")
baseurl=args.url or "null://"
if baseurl.startswith('file') and not baseurl.endswith(os.sep):
baseurl+=os.sep
if baseurl.startswith('http') and not baseurl.endswith('/'):
baseurl+='/'
deck=None
if args.netrunnerdb != None:
deck=load_netrunnerdb_deck(args.netrunnerdb)
if args.octgn != None:
deck=load_octgn_deck(args.octgn)
deck['back_filename']=args.back
write_files(deck,baseurl,args.writelocal,"",args.install)
if __name__ == "__main__":
main()
| mit |
ddico/odoomrp-wip | account_treasury_forecast/models/account_treasury_forecast.py | 31 | 9136 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2010 - 2011 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import openerp.addons.decimal_precision as dp
from openerp import models, fields, api, exceptions, _
class AccountTreasuryForecastInvoice(models.Model):
_name = 'account.treasury.forecast.invoice'
_description = 'Treasury Forecast Invoice'
invoice_id = fields.Many2one("account.invoice", string="Invoice")
date_due = fields.Date(string="Due Date")
partner_id = fields.Many2one("res.partner", string="Partner")
journal_id = fields.Many2one("account.journal", string="Journal")
state = fields.Selection([('draft', 'Draft'), ('proforma', 'Pro-forma'),
('proforma2', 'Pro-forma'), ('open', 'Opened'),
('paid', 'Paid'), ('cancel', 'Canceled')],
string="State")
base_amount = fields.Float(string="Base Amount",
digits_compute=dp.get_precision('Account'))
tax_amount = fields.Float(string="Tax Amount",
digits_compute=dp.get_precision('Account'))
total_amount = fields.Float(string="Total Amount",
digits_compute=dp.get_precision('Account'))
residual_amount = fields.Float(string="Residual Amount",
digits_compute=dp.get_precision('Account'))
class AccountTreasuryForecast(models.Model):
_name = 'account.treasury.forecast'
_description = 'Treasury Forecast'
@api.one
def calc_final_amount(self):
balance = 0
for out_invoice in self.out_invoice_ids:
balance += out_invoice.total_amount
for in_invoice in self.in_invoice_ids:
balance -= in_invoice.total_amount
for recurring_line in self.recurring_line_ids:
balance -= recurring_line.amount
for variable_line in self.variable_line_ids:
balance -= variable_line.amount
balance += self.start_amount
self.final_amount = balance
name = fields.Char(string="Description", required=True)
template_id = fields.Many2one("account.treasury.forecast.template",
string="Template", required=True)
start_date = fields.Date(string="Start Date", required=True)
end_date = fields.Date(string="End Date", required=True)
start_amount = fields.Float(string="Start Amount",
digits_compute=dp.get_precision('Account'))
final_amount = fields.Float(string="Final Amount",
compute="calc_final_amount",
digits_compute=dp.get_precision('Account'))
check_draft = fields.Boolean(string="Draft", default=1)
check_proforma = fields.Boolean(string="Proforma", default=1)
check_open = fields.Boolean(string="Opened", default=1)
out_invoice_ids = fields.Many2many(
comodel_name="account.treasury.forecast.invoice",
relation="account_treasury_forecast_out_invoice_rel",
column1="treasury_id", column2="out_invoice_id",
string="Out Invoices")
in_invoice_ids = fields.Many2many(
comodel_name="account.treasury.forecast.invoice",
relation="account_treasury_forecast_in_invoice_rel",
column1="treasury_id", column2="in_invoice_id",
string="In Invoices")
recurring_line_ids = fields.One2many(
"account.treasury.forecast.line", "treasury_id",
string="Recurring Lines", domain=[('line_type', '=', 'recurring')])
variable_line_ids = fields.One2many(
"account.treasury.forecast.line", "treasury_id",
string="Variable Lines", domain=[('line_type', '=', 'variable')])
@api.one
@api.constrains('end_date', 'start_date')
def check_date(self):
if self.start_date > self.end_date:
raise exceptions.Warning(
_('Error!:: End date is lower than start date.'))
@api.one
@api.constrains('check_draft', 'check_proforma', 'check_open')
def check_filter(self):
if not self.check_draft and not self.check_proforma and \
not self.check_open:
raise exceptions.Warning(
_('Error!:: There is no any filter checked.'))
@api.one
def restart(self):
self.out_invoice_ids.unlink()
self.in_invoice_ids.unlink()
self.recurring_line_ids.unlink()
self.variable_line_ids.unlink()
return True
@api.multi
def button_calculate(self):
self.restart()
self.calculate_invoices()
self.calculate_line()
return True
@api.one
def calculate_invoices(self):
invoice_obj = self.env['account.invoice']
treasury_invoice_obj = self.env['account.treasury.forecast.invoice']
new_invoice_ids = []
in_invoice_lst = []
out_invoice_lst = []
state = []
if self.check_draft:
state.append("draft")
if self.check_proforma:
state.append("proforma")
if self.check_open:
state.append("open")
invoice_ids = invoice_obj.search([('date_due', '>', self.start_date),
('date_due', '<', self.end_date),
('state', 'in', tuple(state))])
for invoice_o in invoice_ids:
values = {
'invoice_id': invoice_o.id,
'date_due': invoice_o.date_due,
'partner_id': invoice_o.partner_id.id,
'journal_id': invoice_o.journal_id.id,
'state': invoice_o.state,
'base_amount': invoice_o.amount_untaxed,
'tax_amount': invoice_o.amount_tax,
'total_amount': invoice_o.amount_total,
'residual_amount': invoice_o.residual,
}
new_id = treasury_invoice_obj.create(values)
new_invoice_ids.append(new_id)
if invoice_o.type in ("out_invoice", "out_refund"):
out_invoice_lst.append(new_id.id)
elif invoice_o.type in ("in_invoice", "in_refund"):
in_invoice_lst.append(new_id.id)
self.write({'out_invoice_ids': [(6, 0, out_invoice_lst)],
'in_invoice_ids': [(6, 0, in_invoice_lst)]})
return new_invoice_ids
@api.one
def calculate_line(self):
line_obj = self.env['account.treasury.forecast.line']
temp_line_obj = self.env['account.treasury.forecast.line.template']
new_line_ids = []
temp_line_lst = temp_line_obj.search([('treasury_template_id', '=',
self.template_id.id)])
for line_o in temp_line_lst:
if ((line_o.date > self.start_date and
line_o.date < self.end_date) or
not line_o.date) and not line_o.paid:
values = {
'name': line_o.name,
'date': line_o.date,
'line_type': line_o.line_type,
'partner_id': line_o.partner_id.id,
'template_line_id': line_o.id,
'amount': line_o.amount,
'treasury_id': self.id,
}
new_line_id = line_obj.create(values)
new_line_ids.append(new_line_id)
return new_line_ids
class AccountTreasuryForecastLine(models.Model):
_name = 'account.treasury.forecast.line'
_description = 'Treasury Forecast Line'
name = fields.Char(string="Description")
line_type = fields.Selection([('recurring', 'Recurring'),
('variable', 'Variable')],
string="Treasury Line Type")
date = fields.Date(string="Date")
partner_id = fields.Many2one("res.partner", string="Partner")
amount = fields.Float(string="Amount",
digits_compute=dp.get_precision('Account'))
template_line_id = fields.Many2one(
"account.treasury.forecast.line.template", string="Template Line")
treasury_id = fields.Many2one("account.treasury.forecast",
string="Treasury")
| agpl-3.0 |
tchellomello/home-assistant | tests/components/hue/test_config_flow.py | 2 | 20257 | """Tests for Philips Hue config flow."""
import asyncio
from aiohttp import client_exceptions
import aiohue
from aiohue.discovery import URL_NUPNP
import pytest
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.components.hue import config_flow, const
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import MockConfigEntry
@pytest.fixture(name="hue_setup", autouse=True)
def hue_setup_fixture():
"""Mock hue entry setup."""
with patch("homeassistant.components.hue.async_setup_entry", return_value=True):
yield
def get_mock_bridge(
bridge_id="aabbccddeeff", host="1.2.3.4", mock_create_user=None, username=None
):
"""Return a mock bridge."""
mock_bridge = Mock()
mock_bridge.host = host
mock_bridge.username = username
mock_bridge.config.name = "Mock Bridge"
mock_bridge.id = bridge_id
if not mock_create_user:
async def create_user(username):
mock_bridge.username = username
mock_create_user = create_user
mock_bridge.create_user = mock_create_user
mock_bridge.initialize = AsyncMock()
return mock_bridge
async def test_flow_works(hass):
"""Test config flow ."""
mock_bridge = get_mock_bridge()
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
return_value=[mock_bridge],
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"id": mock_bridge.id}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert flow["context"]["unique_id"] == "aabbccddeeff"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["title"] == "Mock Bridge"
assert result["data"] == {
"host": "1.2.3.4",
"username": "home-assistant#test-home",
}
assert len(mock_bridge.initialize.mock_calls) == 1
async def test_manual_flow_works(hass, aioclient_mock):
"""Test config flow discovers only already configured bridges."""
mock_bridge = get_mock_bridge()
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
return_value=[mock_bridge],
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"id": "manual"}
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
bridge = get_mock_bridge(
bridge_id="id-1234", host="2.2.2.2", username="username-abc"
)
with patch(
"aiohue.Bridge",
return_value=bridge,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "2.2.2.2"}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
with patch("homeassistant.components.hue.config_flow.authenticate_bridge"), patch(
"homeassistant.components.hue.async_unload_entry", return_value=True
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "create_entry"
assert result["title"] == "Mock Bridge"
assert result["data"] == {
"host": "2.2.2.2",
"username": "username-abc",
}
entries = hass.config_entries.async_entries("hue")
assert len(entries) == 1
entry = entries[-1]
assert entry.unique_id == "id-1234"
async def test_manual_flow_bridge_exist(hass, aioclient_mock):
"""Test config flow discovers only already configured bridges."""
MockConfigEntry(
domain="hue", unique_id="id-1234", data={"host": "2.2.2.2"}
).add_to_hass(hass)
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
return_value=[],
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
bridge = get_mock_bridge(
bridge_id="id-1234", host="2.2.2.2", username="username-abc"
)
with patch(
"aiohue.Bridge",
return_value=bridge,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "2.2.2.2"}
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_manual_flow_no_discovered_bridges(hass, aioclient_mock):
"""Test config flow discovers no bridges."""
aioclient_mock.get(URL_NUPNP, json=[])
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
async def test_flow_all_discovered_bridges_exist(hass, aioclient_mock):
"""Test config flow discovers only already configured bridges."""
aioclient_mock.get(URL_NUPNP, json=[{"internalipaddress": "1.2.3.4", "id": "bla"}])
MockConfigEntry(
domain="hue", unique_id="bla", data={"host": "1.2.3.4"}
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "manual"
async def test_flow_bridges_discovered(hass, aioclient_mock):
"""Test config flow discovers two bridges."""
# Add ignored config entry. Should still show up as option.
MockConfigEntry(
domain="hue", source=config_entries.SOURCE_IGNORE, unique_id="bla"
).add_to_hass(hass)
aioclient_mock.get(
URL_NUPNP,
json=[
{"internalipaddress": "1.2.3.4", "id": "bla"},
{"internalipaddress": "5.6.7.8", "id": "beer"},
],
)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
with pytest.raises(vol.Invalid):
assert result["data_schema"]({"id": "not-discovered"})
result["data_schema"]({"id": "bla"})
result["data_schema"]({"id": "beer"})
result["data_schema"]({"id": "manual"})
async def test_flow_two_bridges_discovered_one_new(hass, aioclient_mock):
"""Test config flow discovers two bridges."""
aioclient_mock.get(
URL_NUPNP,
json=[
{"internalipaddress": "1.2.3.4", "id": "bla"},
{"internalipaddress": "5.6.7.8", "id": "beer"},
],
)
MockConfigEntry(
domain="hue", unique_id="bla", data={"host": "1.2.3.4"}
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "init"
assert result["data_schema"]({"id": "beer"})
assert result["data_schema"]({"id": "manual"})
with pytest.raises(vol.error.MultipleInvalid):
assert not result["data_schema"]({"id": "bla"})
async def test_flow_timeout_discovery(hass):
"""Test config flow ."""
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
side_effect=asyncio.TimeoutError,
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
assert result["type"] == "abort"
assert result["reason"] == "discover_timeout"
async def test_flow_link_timeout(hass):
"""Test config flow."""
mock_bridge = get_mock_bridge(
mock_create_user=AsyncMock(side_effect=asyncio.TimeoutError),
)
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
return_value=[mock_bridge],
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"id": mock_bridge.id}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_flow_link_unknown_error(hass):
"""Test if a unknown error happened during the linking processes."""
mock_bridge = get_mock_bridge(
mock_create_user=AsyncMock(side_effect=OSError),
)
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
return_value=[mock_bridge],
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"id": mock_bridge.id}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "linking"}
async def test_flow_link_button_not_pressed(hass):
"""Test config flow ."""
mock_bridge = get_mock_bridge(
mock_create_user=AsyncMock(side_effect=aiohue.LinkButtonNotPressed),
)
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
return_value=[mock_bridge],
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"id": mock_bridge.id}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] == {"base": "register_failed"}
async def test_flow_link_unknown_host(hass):
"""Test config flow ."""
mock_bridge = get_mock_bridge(
mock_create_user=AsyncMock(side_effect=client_exceptions.ClientOSError),
)
with patch(
"homeassistant.components.hue.config_flow.discover_nupnp",
return_value=[mock_bridge],
):
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"id": mock_bridge.id}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_bridge_ssdp(hass):
"""Test a bridge being discovered."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "ssdp"},
data={
ssdp.ATTR_SSDP_LOCATION: "http://0.0.0.0/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "1234",
},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_bridge_ssdp_discover_other_bridge(hass):
"""Test that discovery ignores other bridges."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "ssdp"},
data={ssdp.ATTR_UPNP_MANUFACTURER_URL: "http://www.notphilips.com"},
)
assert result["type"] == "abort"
assert result["reason"] == "not_hue_bridge"
async def test_bridge_ssdp_emulated_hue(hass):
"""Test if discovery info is from an emulated hue instance."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "ssdp"},
data={
ssdp.ATTR_SSDP_LOCATION: "http://0.0.0.0/",
ssdp.ATTR_UPNP_FRIENDLY_NAME: "Home Assistant Bridge",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "1234",
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_hue_bridge"
async def test_bridge_ssdp_missing_location(hass):
"""Test if discovery info is missing a location attribute."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "ssdp"},
data={
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "1234",
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_hue_bridge"
async def test_bridge_ssdp_missing_serial(hass):
"""Test if discovery info is a serial attribute."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "ssdp"},
data={
ssdp.ATTR_SSDP_LOCATION: "http://0.0.0.0/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_hue_bridge"
async def test_bridge_ssdp_espalexa(hass):
"""Test if discovery info is from an Espalexa based device."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "ssdp"},
data={
ssdp.ATTR_SSDP_LOCATION: "http://0.0.0.0/",
ssdp.ATTR_UPNP_FRIENDLY_NAME: "Espalexa (0.0.0.0)",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "1234",
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_hue_bridge"
async def test_bridge_ssdp_already_configured(hass):
"""Test if a discovered bridge has already been configured."""
MockConfigEntry(
domain="hue", unique_id="1234", data={"host": "0.0.0.0"}
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "ssdp"},
data={
ssdp.ATTR_SSDP_LOCATION: "http://0.0.0.0/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "1234",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_import_with_no_config(hass):
"""Test importing a host without an existing config file."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "import"},
data={"host": "0.0.0.0"},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_creating_entry_removes_entries_for_same_host_or_bridge(hass):
"""Test that we clean up entries for same host and bridge.
An IP can only hold a single bridge and a single bridge can only be
accessible via a single IP. So when we create a new entry, we'll remove
all existing entries that either have same IP or same bridge_id.
"""
orig_entry = MockConfigEntry(
domain="hue",
data={"host": "0.0.0.0", "username": "aaaa"},
unique_id="id-1234",
)
orig_entry.add_to_hass(hass)
MockConfigEntry(
domain="hue",
data={"host": "1.2.3.4", "username": "bbbb"},
unique_id="id-5678",
).add_to_hass(hass)
assert len(hass.config_entries.async_entries("hue")) == 2
bridge = get_mock_bridge(
bridge_id="id-1234", host="2.2.2.2", username="username-abc"
)
with patch(
"aiohue.Bridge",
return_value=bridge,
):
result = await hass.config_entries.flow.async_init(
"hue", data={"host": "2.2.2.2"}, context={"source": "import"}
)
assert result["type"] == "form"
assert result["step_id"] == "link"
with patch("homeassistant.components.hue.config_flow.authenticate_bridge"), patch(
"homeassistant.components.hue.async_unload_entry", return_value=True
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "create_entry"
assert result["title"] == "Mock Bridge"
assert result["data"] == {
"host": "2.2.2.2",
"username": "username-abc",
}
entries = hass.config_entries.async_entries("hue")
assert len(entries) == 2
new_entry = entries[-1]
assert orig_entry.entry_id != new_entry.entry_id
assert new_entry.unique_id == "id-1234"
async def test_bridge_homekit(hass, aioclient_mock):
"""Test a bridge being discovered via HomeKit."""
aioclient_mock.get(URL_NUPNP, json=[{"internalipaddress": "1.2.3.4", "id": "bla"}])
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "homekit"},
data={
"host": "0.0.0.0",
"serial": "1234",
"manufacturerURL": config_flow.HUE_MANUFACTURERURL,
"properties": {"id": "aa:bb:cc:dd:ee:ff"},
},
)
assert result["type"] == "form"
assert result["step_id"] == "init"
async def test_bridge_import_already_configured(hass):
"""Test if a import flow aborts if host is already configured."""
MockConfigEntry(
domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0"}
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "import"},
data={"host": "0.0.0.0", "properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_bridge_homekit_already_configured(hass):
"""Test if a HomeKit discovered bridge has already been configured."""
MockConfigEntry(
domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0"}
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "homekit"},
data={"host": "0.0.0.0", "properties": {"id": "aa:bb:cc:dd:ee:ff"}},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_ssdp_discovery_update_configuration(hass):
"""Test if a discovered bridge is configured and updated with new host."""
entry = MockConfigEntry(
domain="hue", unique_id="aabbccddeeff", data={"host": "0.0.0.0"}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": "ssdp"},
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.1.1.1/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: config_flow.HUE_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: "aabbccddeeff",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["host"] == "1.1.1.1"
async def test_options_flow(hass):
"""Test options config flow."""
entry = MockConfigEntry(
domain="hue",
unique_id="aabbccddeeff",
data={"host": "0.0.0.0"},
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
const.CONF_ALLOW_HUE_GROUPS: True,
const.CONF_ALLOW_UNREACHABLE: True,
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
const.CONF_ALLOW_HUE_GROUPS: True,
const.CONF_ALLOW_UNREACHABLE: True,
}
| apache-2.0 |
eamigo86/graphene-django-extras | graphene_django_extras/fields.py | 1 | 11299 | # -*- coding: utf-8 -*-
import operator
from functools import partial
from graphene import Field, List, ID, Argument
from graphene.types.structures import Structure, NonNull
from graphene_django.fields import DjangoListField as DLF
from graphene_django.filter.utils import get_filtering_args_from_filterset
from graphene_django.utils import (
maybe_queryset,
is_valid_django_model,
DJANGO_FILTER_INSTALLED,
)
from graphene_django_extras.filters.filter import get_filterset_class
from graphene_django_extras.settings import graphql_api_settings
from .base_types import DjangoListObjectBase
from .paginations.pagination import BaseDjangoGraphqlPagination
from .utils import get_extra_filters, queryset_factory, get_related_fields, find_field
# *********************************************** #
# *********** FIELD FOR SINGLE OBJECT *********** #
# *********************************************** #
class DjangoObjectField(Field):
def __init__(self, _type, *args, **kwargs):
kwargs["id"] = ID(
required=True, description="Django object unique identification field"
)
super(DjangoObjectField, self).__init__(_type, *args, **kwargs)
@property
def model(self):
return self.type._meta.node._meta.model
@staticmethod
def object_resolver(manager, root, info, **kwargs):
id = kwargs.pop("id", None)
try:
return manager.get_queryset().get(pk=id)
except manager.model.DoesNotExist:
return None
def wrap_resolve(self, parent_resolver):
return partial(self.object_resolver, self.type._meta.model._default_manager)
# *********************************************** #
# *************** FIELDS FOR LIST *************** #
# *********************************************** #
class DjangoListField(DLF):
def __init__(self, _type, *args, **kwargs):
if isinstance(_type, NonNull):
_type = _type.of_type
super(DLF, self).__init__(List(NonNull(_type)), *args, **kwargs)
class DjangoFilterListField(Field):
def __init__(
self,
_type,
fields=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs,
):
if DJANGO_FILTER_INSTALLED:
_fields = _type._meta.filter_fields
_model = _type._meta.model
self.fields = fields or _fields
meta = dict(model=_model, fields=self.fields)
if extra_filter_meta:
meta.update(extra_filter_meta)
filterset_class = filterset_class or _type._meta.filterset_class
self.filterset_class = get_filterset_class(filterset_class, **meta)
self.filtering_args = get_filtering_args_from_filterset(
self.filterset_class, _type
)
kwargs.setdefault("args", {})
kwargs["args"].update(self.filtering_args)
if "id" not in kwargs["args"].keys():
self.filtering_args.update(
{
"id": Argument(
ID, description="Django object unique identification field"
)
}
)
kwargs["args"].update(
{
"id": Argument(
ID, description="Django object unique identification field"
)
}
)
if not kwargs.get("description", None):
kwargs["description"] = "{} list".format(_type._meta.model.__name__)
super(DjangoFilterListField, self).__init__(List(_type), *args, **kwargs)
@property
def model(self):
return self.type.of_type._meta.node._meta.model
@staticmethod
def list_resolver(manager, filterset_class, filtering_args, root, info, **kwargs):
qs = None
field = None
if root and is_valid_django_model(root._meta.model):
available_related_fields = get_related_fields(root._meta.model)
field = find_field(info.field_nodes[0], available_related_fields)
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
if field is not None:
try:
if filter_kwargs:
qs = operator.attrgetter(
"{}.filter".format(
getattr(field, "related_name", None) or field.name
)
)(root)(**filter_kwargs)
else:
qs = operator.attrgetter(
"{}.all".format(
getattr(field, "related_name", None) or field.name
)
)(root)()
except AttributeError:
qs = None
if qs is None:
qs = queryset_factory(manager, root, info, **kwargs)
qs = filterset_class(
data=filter_kwargs, queryset=qs, request=info.context
).qs
if root and is_valid_django_model(root._meta.model):
extra_filters = get_extra_filters(root, manager.model)
qs = qs.filter(**extra_filters)
return maybe_queryset(qs)
def wrap_resolve(self, parent_resolver):
current_type = self.type
while isinstance(current_type, Structure):
current_type = current_type.of_type
return partial(
self.list_resolver,
current_type._meta.model._default_manager,
self.filterset_class,
self.filtering_args,
)
class DjangoFilterPaginateListField(Field):
def __init__(
self,
_type,
pagination=None,
fields=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs,
):
_fields = _type._meta.filter_fields
_model = _type._meta.model
self.fields = fields or _fields
meta = dict(model=_model, fields=self.fields)
if extra_filter_meta:
meta.update(extra_filter_meta)
filterset_class = filterset_class or _type._meta.filterset_class
self.filterset_class = get_filterset_class(filterset_class, **meta)
self.filtering_args = get_filtering_args_from_filterset(
self.filterset_class, _type
)
kwargs.setdefault("args", {})
kwargs["args"].update(self.filtering_args)
if "id" not in kwargs["args"].keys():
self.filtering_args.update(
{
"id": Argument(
ID, description="Django object unique identification field"
)
}
)
kwargs["args"].update(
{
"id": Argument(
ID, description="Django object unique identification field"
)
}
)
pagination = pagination or graphql_api_settings.DEFAULT_PAGINATION_CLASS()
if pagination is not None:
assert isinstance(pagination, BaseDjangoGraphqlPagination), (
'You need to pass a valid DjangoGraphqlPagination in DjangoFilterPaginateListField, received "{}".'
).format(pagination)
pagination_kwargs = pagination.to_graphql_fields()
self.pagination = pagination
kwargs.update(**pagination_kwargs)
if not kwargs.get("description", None):
kwargs["description"] = "{} list".format(_type._meta.model.__name__)
super(DjangoFilterPaginateListField, self).__init__(
List(NonNull(_type)), *args, **kwargs
)
@property
def model(self):
return self.type.of_type._meta.node._meta.model
def get_queryset(self, manager, root, info, **kwargs):
return queryset_factory(manager, root, info, **kwargs)
def list_resolver(
self, manager, filterset_class, filtering_args, root, info, **kwargs
):
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
qs = self.get_queryset(manager, root, info, **kwargs)
qs = filterset_class(data=filter_kwargs, queryset=qs, request=info.context).qs
if root and is_valid_django_model(root._meta.model):
extra_filters = get_extra_filters(root, manager.model)
qs = qs.filter(**extra_filters)
if getattr(self, "pagination", None):
qs = self.pagination.paginate_queryset(qs, **kwargs)
return maybe_queryset(qs)
def wrap_resolve(self, parent_resolver):
current_type = self.type
while isinstance(current_type, Structure):
current_type = current_type.of_type
return partial(
self.list_resolver,
current_type._meta.model._default_manager,
self.filterset_class,
self.filtering_args,
)
class DjangoListObjectField(Field):
def __init__(
self,
_type,
fields=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs,
):
if DJANGO_FILTER_INSTALLED:
_fields = _type._meta.filter_fields
_model = _type._meta.model
self.fields = fields or _fields
meta = dict(model=_model, fields=self.fields)
if extra_filter_meta:
meta.update(extra_filter_meta)
filterset_class = filterset_class or _type._meta.filterset_class
self.filterset_class = get_filterset_class(filterset_class, **meta)
self.filtering_args = get_filtering_args_from_filterset(
self.filterset_class, _type
)
kwargs.setdefault("args", {})
kwargs["args"].update(self.filtering_args)
if "id" not in kwargs["args"].keys():
id_description = "Django object unique identification field"
self.filtering_args.update(
{"id": Argument(ID, description=id_description)}
)
kwargs["args"].update({"id": Argument(ID, description=id_description)})
if not kwargs.get("description", None):
kwargs["description"] = "{} list".format(_type._meta.model.__name__)
super(DjangoListObjectField, self).__init__(_type, *args, **kwargs)
@property
def model(self):
return self.type._meta.model
def list_resolver(
self, manager, filterset_class, filtering_args, root, info, **kwargs
):
qs = queryset_factory(manager, root, info, **kwargs)
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
qs = filterset_class(data=filter_kwargs, queryset=qs, request=info.context).qs
count = qs.count()
return DjangoListObjectBase(
count=count,
results=maybe_queryset(qs),
results_field_name=self.type._meta.results_field_name,
)
def wrap_resolve(self, parent_resolver):
return partial(
self.list_resolver,
self.type._meta.model._default_manager,
self.filterset_class,
self.filtering_args,
)
| mit |
OpenDMM/bitbake | lib/bb/data.py | 1 | 13319 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'Data' implementations
Functions for interacting with the data structure used by the
BitBake build tools.
The expandData and update_data are the most expensive
operations. At night the cookie monster came by and
suggested 'give me cookies on setting the variables and
things will work out'. Taking this suggestion into account
applying the skills from the not yet passed 'Entwurf und
Analyse von Algorithmen' lecture and the cookie
monster seems to be right. We will track setVar more carefully
to have faster update_data and expandKeys operations.
This is a treade-off between speed and memory again but
the speed is more critical here.
"""
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2005 Holger Hans Peter Freyther
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#Based on functions from the base bb module, Copyright 2003 Holger Schurig
import sys, os, re
if sys.argv[0][-5:] == "pydoc":
path = os.path.dirname(os.path.dirname(sys.argv[1]))
else:
path = os.path.dirname(os.path.dirname(sys.argv[0]))
sys.path.insert(0, path)
from itertools import groupby
from bb import data_smart
from bb import codeparser
import bb
logger = data_smart.logger
_dict_type = data_smart.DataSmart
def init():
"""Return a new object representing the Bitbake data"""
return _dict_type()
def init_db(parent = None):
"""Return a new object representing the Bitbake data,
optionally based on an existing object"""
if parent is not None:
return parent.createCopy()
else:
return _dict_type()
def createCopy(source):
"""Link the source set to the destination
If one does not find the value in the destination set,
search will go on to the source set to get the value.
Value from source are copy-on-write. i.e. any try to
modify one of them will end up putting the modified value
in the destination set.
"""
return source.createCopy()
def initVar(var, d):
"""Non-destructive var init for data structure"""
d.initVar(var)
def setVar(var, value, d):
"""Set a variable to a given value"""
d.setVar(var, value)
def getVar(var, d, exp = 0):
"""Gets the value of a variable"""
return d.getVar(var, exp)
def renameVar(key, newkey, d):
"""Renames a variable from key to newkey"""
d.renameVar(key, newkey)
def delVar(var, d):
"""Removes a variable from the data set"""
d.delVar(var)
def appendVar(var, value, d):
"""Append additional value to a variable"""
d.appendVar(var, value)
def setVarFlag(var, flag, flagvalue, d):
"""Set a flag for a given variable to a given value"""
d.setVarFlag(var, flag, flagvalue)
def getVarFlag(var, flag, d):
"""Gets given flag from given var"""
return d.getVarFlag(var, flag)
def delVarFlag(var, flag, d):
"""Removes a given flag from the variable's flags"""
d.delVarFlag(var, flag)
def setVarFlags(var, flags, d):
"""Set the flags for a given variable
Note:
setVarFlags will not clear previous
flags. Think of this method as
addVarFlags
"""
d.setVarFlags(var, flags)
def getVarFlags(var, d):
"""Gets a variable's flags"""
return d.getVarFlags(var)
def delVarFlags(var, d):
"""Removes a variable's flags"""
d.delVarFlags(var)
def keys(d):
"""Return a list of keys in d"""
return d.keys()
__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
__expand_python_regexp__ = re.compile(r"\${@.+?}")
def expand(s, d, varname = None):
"""Variable expansion using the data store"""
return d.expand(s, varname)
def expandKeys(alterdata, readdata = None):
if readdata == None:
readdata = alterdata
todolist = {}
for key in alterdata:
if not '${' in key:
continue
ekey = expand(key, readdata)
if key == ekey:
continue
todolist[key] = ekey
# These two for loops are split for performance to maximise the
# usefulness of the expand cache
for key in todolist:
ekey = todolist[key]
newval = alterdata.getVar(ekey, 0)
if newval:
val = alterdata.getVar(key, 0)
if val is not None and newval is not None:
bb.warn("Variable key %s (%s) replaces original key %s (%s)." % (key, val, ekey, newval))
alterdata.renameVar(key, ekey)
def inheritFromOS(d, savedenv, permitted):
"""Inherit variables from the initial environment."""
exportlist = bb.utils.preserved_envvars_exported()
for s in savedenv.keys():
if s in permitted:
try:
d.setVar(s, getVar(s, savedenv, True), op = 'from env')
if s in exportlist:
d.setVarFlag(s, "export", True, op = 'auto env export')
except TypeError:
pass
def emit_var(var, o=sys.__stdout__, d = init(), all=False):
"""Emit a variable to be sourced by a shell."""
if getVarFlag(var, "python", d):
return 0
export = getVarFlag(var, "export", d)
unexport = getVarFlag(var, "unexport", d)
func = getVarFlag(var, "func", d)
if not all and not export and not unexport and not func:
return 0
try:
if all:
oval = getVar(var, d, 0)
val = getVar(var, d, 1)
except (KeyboardInterrupt, bb.build.FuncFailed):
raise
except Exception as exc:
o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc)))
return 0
if all:
d.varhistory.emit(var, oval, val, o)
if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all:
return 0
varExpanded = expand(var, d)
if unexport:
o.write('unset %s\n' % varExpanded)
return 0
if val is None:
return 0
val = str(val)
if func:
# NOTE: should probably check for unbalanced {} within the var
o.write("%s() {\n%s\n}\n" % (varExpanded, val))
return 1
if export:
o.write('export ')
# if we're going to output this within doublequotes,
# to a shell, we need to escape the quotes in the var
alter = re.sub('"', '\\"', val)
alter = re.sub('\n', ' \\\n', alter)
o.write('%s="%s"\n' % (varExpanded, alter))
return 0
def emit_env(o=sys.__stdout__, d = init(), all=False):
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
isfunc = lambda key: bool(d.getVarFlag(key, "func"))
keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc)
grouped = groupby(keys, isfunc)
for isfunc, keys in grouped:
for key in keys:
emit_var(key, o, d, all and not isfunc) and o.write('\n')
def exported_keys(d):
return (key for key in d.keys() if not key.startswith('__') and
d.getVarFlag(key, 'export') and
not d.getVarFlag(key, 'unexport'))
def exported_vars(d):
for key in exported_keys(d):
try:
value = d.getVar(key, True)
except Exception:
pass
if value is not None:
yield key, str(value)
def emit_func(func, o=sys.__stdout__, d = init()):
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func"))
for key in keys:
emit_var(key, o, d, False) and o.write('\n')
emit_var(func, o, d, False) and o.write('\n')
newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True))
newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
seen = set()
while newdeps:
deps = newdeps
seen |= deps
newdeps = set()
for dep in deps:
if d.getVarFlag(dep, "func"):
emit_var(dep, o, d, False) and o.write('\n')
newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep, True))
newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
newdeps -= seen
def update_data(d):
"""Performs final steps upon the datastore, including application of overrides"""
d.finalize(parent = True)
def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
deps = set()
try:
if key[-1] == ']':
vf = key[:-1].split('[')
value = d.getVarFlag(vf[0], vf[1], False)
parser = d.expandWithRefs(value, key)
deps |= parser.references
deps = deps | (keys & parser.execs)
return deps, value
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude"]) or {}
vardeps = varflags.get("vardeps")
value = d.getVar(key, False)
def handle_contains(value, contains, d):
newvalue = ""
for k in sorted(contains):
l = (d.getVar(k, True) or "").split()
for word in sorted(contains[k]):
if word in l:
newvalue += "\n%s{%s} = Set" % (k, word)
else:
newvalue += "\n%s{%s} = Unset" % (k, word)
if not newvalue:
return value
if not value:
return newvalue
return value + newvalue
if "vardepvalue" in varflags:
value = varflags.get("vardepvalue")
elif varflags.get("func"):
if varflags.get("python"):
parsedvar = d.expandWithRefs(value, key)
parser = bb.codeparser.PythonParser(key, logger)
if parsedvar.value and "\t" in parsedvar.value:
logger.warn("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE", True)))
parser.parse_python(parsedvar.value)
deps = deps | parser.references
value = handle_contains(value, parser.contains, d)
else:
parsedvar = d.expandWithRefs(value, key)
parser = bb.codeparser.ShellParser(key, logger)
parser.parse_shell(parsedvar.value)
deps = deps | shelldeps
if vardeps is None:
parser.log.flush()
deps = deps | parsedvar.references
deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
value = handle_contains(value, parsedvar.contains, d)
else:
parser = d.expandWithRefs(value, key)
deps |= parser.references
deps = deps | (keys & parser.execs)
value = handle_contains(value, parser.contains, d)
# Add varflags, assuming an exclusion list is set
if varflagsexcl:
varfdeps = []
for f in varflags:
if f not in varflagsexcl:
varfdeps.append('%s[%s]' % (key, f))
if varfdeps:
deps |= set(varfdeps)
deps |= set((vardeps or "").split())
deps -= set(varflags.get("vardepsexclude", "").split())
except Exception as e:
raise bb.data_smart.ExpansionError(key, None, e)
return deps, value
#bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs)))
#d.setVarFlag(key, "vardeps", deps)
def generate_dependencies(d):
keys = set(key for key in d if not key.startswith("__"))
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export") and not d.getVarFlag(key, "unexport"))
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
deps = {}
values = {}
tasklist = d.getVar('__BBTASKS') or []
for task in tasklist:
deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, d)
newdeps = deps[task]
seen = set()
while newdeps:
nextdeps = newdeps
seen |= nextdeps
newdeps = set()
for dep in nextdeps:
if dep not in deps:
deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, d)
newdeps |= deps[dep]
newdeps -= seen
#print "For %s: %s" % (task, str(deps[task]))
return tasklist, deps, values
def inherits_class(klass, d):
val = getVar('__inherit_cache', d) or []
needle = os.path.join('classes', '%s.bbclass' % klass)
for v in val:
if v.endswith(needle):
return True
return False
| gpl-2.0 |
knz/dashboard | report.py | 1 | 8898 | from datetime import datetime,timedelta
import humanize
import sqlite3
import sys
import time
conn = sqlite3.connect("issues.db")
cu = conn.cursor()
c = conn.cursor()
def header(user):
print("""<!DOCTYPE html>
<html lang=en>
<head>
<title>CockroachDB Issue dashboard - %s</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap-theme.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.12.0/jquery.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js"></script></head>
<body><div class="container-fluid">
<style type=text/css>
td { font-family: monospace !important; }
.hr { height: 3px; color: solid black; }
</style>
<script>
$(document).ready(function(){
$('[data-toggle="tooltip"]').tooltip();
});
</script>
<h1>Issue dashboard for %s</h1>
""" % (user,user))
print("<p>Generated on {0} UTC</p>".format(time.asctime(time.gmtime())))
print("<div id='accordion' class='panel-group'>")
def footer():
print('<script src="sorttable.js"></script>')
print("</div></div></body></html>")
def genSection(title, divid, query, colorize):
rows = list(enumerate(c.execute(query)))
nrows = len(rows)
print("""
<div class="panel panel-default">
<div class="panel-heading">
<h2 class="panel-title">
<a data-toggle="collapse" data-parent="#accordion" href="#{divid}">
<strong>{title} ({nrows} issues)</strong>
</a>
</h2>
</div>
<div id="{divid}" class="panel-collapse collapse">
<div class="panel-body">
""".format(**locals()))
print("""
<table class='sortable table table-condensed table-striped table-hover table-responsive'><thead>
<tr><th>#</th><th>Issue</th><th>Reporter</th><th>Assignee</th><th>Created</th><th>Updated</th><th>Milestone</th></tr>
</thead><tbody>""")
for i,row in rows:
created = datetime.strptime(row[3],'%Y-%m-%dT%H:%M:%SZ')
hcreated = humanize.naturaltime(created)
url = row[0].strip()
number = row[1]
title = row[2]
reporter = row[4]
assignee = row[5]
updated = datetime.strptime(row[6],'%Y-%m-%dT%H:%M:%SZ')
hupdated = humanize.naturaltime(updated)
milestone = row[7]
status = colorize(locals())
print("""<tr class='{status}'>
<td>{i}</td>
<td><a href="{url}">{number} - {title}</a></td>
<td><a href="https://github.com/{reporter}">{reporter}</a></td>
<td><a href="https://github.com/{assignee}">{assignee}</a></td>
<td><span class='hidden'>{created}</span>{hcreated}</td>
<td><span class='hidden'>{updated}</span>{hupdated}</td>
<td>{milestone}</td>
</tr>""".format(**locals()))
print("""</tbody></table>""")
print("""
</div>
</div>
</div>
""")
return nrows
def newIsBad(meta):
old = meta['created'] <= datetime.now()-timedelta(weeks=1)
if not old:
return 'danger'
return ''
def oldIsBad(meta):
if meta['updated'] < datetime.now()-timedelta(days=90):
return 'danger'
if meta['updated'] < datetime.now()-timedelta(weeks=1):
return 'warning'
return ''
ops = {
"a_extIssuesIncoming" : {
'title': "Issues from external users without milestone nor assignment",
'query': """
select issues.url, issues.number, issues.title, issues.created, reported.user, issues.assignee, issues.updated, issues.milestone
from issues
join reported on reported.issue=issues.number
join users on reported.user=users.name and users.crl=0
where not exists(select * from assigned where issue=issues.number)
and issues.milestone=""
order by issues.category,issues.updated
"""},
"b_extIssuesUnassigned" : {
'title': "Issues from external users without assignment",
'query': """
select issues.url, issues.number, issues.title, issues.created, reported.user, issues.assignee, issues.updated, issues.milestone
from issues
join reported on reported.issue=issues.number
join users on reported.user=users.name and users.crl=0
where not exists(select * from assigned where issue=issues.number)
order by issues.category,issues.updated
"""},
"h_extIssues" : {
'title': "Issues from external users (all)",
'query': """
select issues.url, issues.number, issues.title, issues.created, reported.user, issues.assignee, issues.updated, issues.milestone
from issues
join reported on reported.issue=issues.number
join users on reported.user=users.name and users.crl=0
order by issues.category,issues.updated
"""},
"c_yourExtIssues" : {
'title': "Issues from external users assigned to you",
'query': """
select issues.url, issues.number, issues.title, issues.created, reported.user, issues.assignee, issues.updated, issues.milestone
from issues
join reported on reported.issue=issues.number
join users on reported.user=users.name and users.crl=0
join assigned on assigned.issue=issues.number and assigned.user='{0}'
order by issues.category,issues.updated
"""},
"d_yourIssues":{
'title': "Issues assigned to you",
'query': """
select issues.url, issues.number, issues.title, issues.created, reported.user, issues.assignee, issues.updated, issues.milestone
from issues
join reported on reported.issue=issues.number
join assigned on assigned.issue=issues.number and assigned.user='{0}'
order by issues.category,issues.updated
"""},
"e_unIssues":{
'title':"Issues created by you that nobody is working on",
'query': """
select issues.url, issues.number, issues.title, issues.created, reported.user, issues.assignee, issues.updated, issues.milestone
from issues
join reported on reported.issue=issues.number and reported.user='{0}'
where not exists (select * from assigned where issue=issues.number)
order by issues.category,issues.updated
"""},
"f_yourIssuesWithoutMilestone":{
'title':"Issues created by you without a milestone",
'query': """
select issues.url, issues.number, issues.title, issues.created, reported.user, issues.assignee, issues.updated, issues.milestone
from issues
join reported on reported.issue=issues.number and reported.user='{0}'
where not exists (select * from assigned where issue=issues.number)
and issues.milestone=""
order by issues.category,issues.updated
"""},
"g_childIssues":{
'title': "Issues you created that someone else is working on",
'query': """
select issues.url, issues.number, issues.title, issues.created, reported.user, issues.assignee, issues.updated, issues.milestone
from issues
join reported on reported.issue=issues.number and reported.user='{0}'
where (select count(*) from assigned where issue=issues.number and user<>'{0}') > 0
order by issues.category,issues.updated
"""},
}
ksorted = sorted(ops.keys())
index = open('dashboard/index.html', 'w')
sys.stdout = index
header('all')
print("<p>Key:</p><table class='table table-condensed' style='width:auto'>")
for i, k in enumerate(ksorted):
print("<tr><td>%c</td><td>%s</td>" % (chr(ord('A')+i), ops[k]['title']))
print("</table>")
print("<table class='table sortable table-condensed table-striped table-hover table-responsive' style='width:auto'><thead>")
print("<tr><th>User</th>")
for i, k in enumerate(ksorted):
print("<th><a href='#' data-toggle='tooltip' title='%s'>%c</a></th>" % (ops[k]['title'], chr(ord('A')+i)))
print("</tr></thead><tbody>")
for row in cu.execute("select name, lower(name) as lname from users where crl=1 order by lname"):
user = row[0]
luser = row[1]
sys.stdout = open('dashboard/' + user + '.html', 'w')
header(user)
extra = []
info = {}
for k,v in ops.items():
info[k] = genSection(v['title'], k, v['query'].format(user), oldIsBad)
footer()
sys.stdout.close()
sys.stdout = index
print("<tr><td><a href='%s.html'>%s</a> <a href='https://github.com/%s'><span class='glyphicon glyphicon-leaf'></span></a>" % (
user, luser, user))
for i, k in enumerate(ksorted):
print("<td><a href='%s.html' data-toggle='tooltip' title='%s'>%d</a></th>" % (user, ops[k]['title'], info[k]))
print("</tr>")
sys.stdout = index
print("</tbody></table>")
footer()
index.close()
| apache-2.0 |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/python/framework/subscribe_test.py | 46 | 9259 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.subscribe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import subscribe
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SubscribeTest(test_util.TensorFlowTestCase):
def _ExpectSubscribedIdentities(self, container):
"""Convenience function to test a container of subscribed identities."""
self.assertTrue(
all(subscribe._is_subscribed_identity(x) for x in container))
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.test_session() as sess:
c_out = sess.run([c])
n_out = sess.run([n])
d_out = sess.run([d])
self.assertEquals(n_out, [-2])
self.assertEquals(c_out, [2])
self.assertEquals(d_out, [42])
self.assertEquals(shared, [2, 2, 2])
def testSupportedTypes(self):
"""Confirm that supported types are correctly detected and handled."""
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
def sub(t):
return t
# Tuples.
subscribed = subscribe.subscribe(
(a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, tuple)
self._ExpectSubscribedIdentities(subscribed)
# Lists.
subscribed = subscribe.subscribe(
[a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, list)
self._ExpectSubscribedIdentities(subscribed)
# Dictionaries.
subscribed = subscribe.subscribe({
'first': a,
'second': b
}, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, dict)
self._ExpectSubscribedIdentities(subscribed.values())
# Namedtuples.
# pylint: disable=invalid-name
TensorPair = collections.namedtuple('TensorPair', ['first', 'second'])
# pylint: enable=invalid-name
pair = TensorPair(a, b)
subscribed = subscribe.subscribe(
pair, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, TensorPair)
self._ExpectSubscribedIdentities(subscribed)
# Expect an exception to be raised for unsupported types.
with self.assertRaisesRegexp(TypeError, 'has invalid type'):
subscribe.subscribe(c.name,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
def testCaching(self):
"""Confirm caching of control output is recalculated between calls."""
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.control_dependencies([a]):
c = constant_op.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with ops.control_dependencies([b]):
d = constant_op.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.test_session() as sess:
c_out = sess.run([c])
d_out = sess.run([d])
self.assertEquals(c_out, [42])
self.assertEquals(d_out, [11])
self.assertEquals(shared, {2: 1, 1: 1})
def testIsSubscribedIdentity(self):
"""Confirm subscribed identity ops are correctly detected."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
idop = array_ops.identity(c)
c_sub = subscribe.subscribe(c, [])
self.assertFalse(subscribe._is_subscribed_identity(a))
self.assertFalse(subscribe._is_subscribed_identity(c))
self.assertFalse(subscribe._is_subscribed_identity(idop))
self.assertTrue(subscribe._is_subscribed_identity(c_sub))
def testSubscribeExtend(self):
"""Confirm side effect are correctly added for different input types."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
shared = {}
def sub(t, name):
shared[name] = shared.get(name, 0) + 1
return t
# Subscribe with a first side effect graph, passing an unsubscribed tensor.
sub_graph1 = lambda t: sub(t, 'graph1')
c_sub = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype]))
# Add a second side effect graph, passing the tensor returned by the
# previous call to subscribe().
sub_graph2 = lambda t: sub(t, 'graph2')
c_sub2 = subscribe.subscribe(
c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype]))
# Add a third side effect graph, passing the original tensor.
sub_graph3 = lambda t: sub(t, 'graph3')
c_sub3 = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype]))
# Make sure there's only one identity op matching the source tensor's name.
graph_ops = ops.get_default_graph().get_operations()
name_prefix = c.op.name + '/subscription/Identity'
identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)]
self.assertEquals(1, len(identity_ops))
# Expect the objects returned by subscribe() to reference the same tensor.
self.assertIs(c_sub, c_sub2)
self.assertIs(c_sub, c_sub3)
# Expect the three side effect graphs to have been evaluated.
with self.test_session() as sess:
sess.run([c_sub])
self.assertIn('graph1', shared)
self.assertIn('graph2', shared)
self.assertIn('graph3', shared)
def testSubscribeVariable(self):
"""Confirm that variables can be subscribed."""
v1 = variables.Variable(0.0)
v2 = variables.Variable(4.0)
add = math_ops.add(v1, v2)
assign_v1 = v1.assign(3.0)
shared = []
def sub(t):
shared.append(t)
return t
v1_sub = subscribe.subscribe(
v1, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertTrue(subscribe._is_subscribed_identity(v1_sub))
with self.test_session() as sess:
# Initialize the variables first.
sess.run([v1.initializer])
sess.run([v2.initializer])
# Expect the side effects to be triggered when evaluating the add op as
# it will read the value of the variable.
sess.run([add])
self.assertEquals(1, len(shared))
# Expect the side effect not to be triggered when evaluating the assign
# op as it will not access the 'read' output of the variable.
sess.run([assign_v1])
self.assertEquals(1, len(shared))
sess.run([add])
self.assertEquals(2, len(shared))
# Make sure the values read from the variable match the expected ones.
self.assertEquals([0.0, 3.0], shared)
def testResourceType(self):
"""Confirm that subscribe correctly handles tensors with 'resource' type."""
tensor_array = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name='test',
size=3,
infer_shape=False)
writer = tensor_array.write(0, [[4.0, 5.0]])
reader = writer.read(0)
shared = []
def sub(t):
shared.append(t)
return t
# TensorArray's handle output tensor has a 'resource' type and cannot be
# subscribed as it's not 'numpy compatible' (see dtypes.py).
# Expect that the original tensor is returned when subscribing to it.
tensor_array_sub = subscribe.subscribe(
tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIs(tensor_array_sub, tensor_array.handle)
self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle))
with self.test_session() as sess:
sess.run([reader])
self.assertEquals(0, len(shared))
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
mihailim/Sigil | src/Resource_Files/plugin_launchers/python/navprocessor.py | 5 | 17631 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Copyright (c) 2019-2020 Kevin B. Hendricks
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import re
from quickparser import QuickXHTMLParser
SIGIL_REPLACE_LANDMARKS_HERE = "<!-- SIGIL_REPLACE_LANDMARKS_HERE -->"
SIGIL_REPLACE_PAGELIST_HERE = "<!-- SIGIL_REPLACE_PAGELIST_HERE -->"
SIGIL_REPLACE_TOC_HERE = "<!-- SIGIL_REPLACE_TOC_HERE -->"
NAV_TOC_PATTERN = re.compile(r'''^\s*<!--\s*SIGIL_REPLACE_TOC_HERE\s*-->\s*$''', re.M)
NAV_PAGELIST_PATTERN = re.compile(r'''^\s*<!--\s*SIGIL_REPLACE_PAGELIST_HERE\s*-->\s*$''', re.M)
NAV_LANDMARKS_PATTERN = re.compile(r'''^\s*<!--\s*SIGIL_REPLACE_LANDMARKS_HERE\s*-->\s*$''', re.M)
# encode/escape text to make it xml safe
def xmlencode(data):
if data is None:
return ''
newdata = xmldecode(data)
newdata = newdata.replace('&', '&')
newdata = newdata.replace('<', '<')
newdata = newdata.replace('>', '>')
newdata = newdata.replace('"', '"')
return newdata
# decode xml encoded/escaped strings
def xmldecode(data):
if data is None:
return ''
newdata = data
newdata = newdata.replace('"', '"')
newdata = newdata.replace('>', '>')
newdata = newdata.replace('<', '<')
newdata = newdata.replace('&', '&')
return newdata
class NavProcessor(object):
def __init__(self, navsrc, codec='utf-8'):
if navsrc is None:
navsrc = ""
if isinstance(navsrc, bytes):
self.content = navsrc.decode(codec)
else:
self.content = navsrc
# returns ordered list of tuples (play_order, nesting_level, href, title)
# href is in url encoded form (percent encodings used if needed)
# title has been xml decoded/unescaped
def getTOC(self):
# parse the nav to get the table of contents
navsrc = self.content
toclist = []
qp = QuickXHTMLParser()
qp.setContent(navsrc)
lvl = 0
po = 0
title = ""
nav_type = None
href = None
for txt, tp, tname, ttype, tattr in qp.parse_iter():
if txt is not None:
if ".a." in tp or tp.endswith(".a"):
title = title + txt
else:
title = ""
else:
if tname == "nav" and ttype == "begin":
nav_type = tattr.get("epub:type", None)
continue
if tname == "nav" and ttype == "end":
nav_type = None
continue
if nav_type is not None and nav_type == "toc":
if tname == "ol":
if ttype == "begin": lvl += 1
if ttype == "end": lvl -= 1
continue
if tname == "a" and ttype == "begin":
href = tattr.get("href", "")
# must leave all url hrefs in raw url encoded form
# if they can ever contain fragments
continue
if tname == "a" and ttype == "end":
po += 1
title = xmldecode(title)
toclist.append((po, lvl, href, title))
title = ""
href = None
continue
return toclist
# replace the TOC with ordered list of tuples (play_order, nesting_level, href, title)
# href should be url encoded (percent encodings present if needed)
# title should be xml decoded/unescaped
def setTOC(self, toclist):
toc_xhtml = self.buildTOC(toclist)
# replace the TOC in the current navsrc with a placeholder
navsrc = self.content
qp = QuickXHTMLParser()
qp.setContent(navsrc)
nav_type = None
res = []
skip_output = False
for txt, tp, tname, ttype, tattr in qp.parse_iter():
if txt is not None:
if not skip_output:
res.append(txt)
else:
if tname == "nav" and ttype == "begin":
nav_type = tattr.get("epub:type", None)
if nav_type is not None and nav_type == "toc":
res.append(SIGIL_REPLACE_TOC_HERE)
skip_output = True
continue
if tname == "nav" and ttype == "end" and nav_type == "toc":
nav_type = None
skip_output = False
continue
if not skip_output:
res.append(qp.tag_info_to_xml(tname, ttype, tattr))
navsrc = "".join(res)
m = re.search(NAV_TOC_PATTERN, navsrc)
if m is None:
return False
navsrc = navsrc[0:m.start()] + toc_xhtml + navsrc[m.end():]
self.content = navsrc
return True
# returns ordered list of tuples (epubtype, href, title)
# href is url encoded (percent encodings present if needed)
# title has been xml decoded/unescaped
def getLandmarks(self):
# parse the nav to get the landmarks
navsrc = self.content
landmarks = []
qp = QuickXHTMLParser()
qp.setContent(navsrc)
title = ""
nav_type = None
href = None
epubtype = None
for txt, tp, tname, ttype, tattr in qp.parse_iter():
if txt is not None:
if ".a." in tp or tp.endswith(".a"):
title = title + txt
else:
title = ""
else:
if tname == "nav" and ttype == "begin":
nav_type = tattr.get("epub:type", None)
continue
if tname == "nav" and ttype == "end":
nav_type = None
continue
if nav_type is not None and nav_type == "landmarks":
if tname == "a" and ttype == "begin":
href = tattr.get("href", "")
# must leave all hrefs in raw url encoded form
# if they can contain fragments
epubtype = tattr.get("epub:type", None)
continue
if tname == "a" and ttype == "end":
if epubtype is not None:
title = xmldecode(title)
landmarks.append((epubtype, href, title))
title = ""
epubtype = None
href = None
continue
return landmarks
# replace the landmarks with ordered list of tuples (epubtype, href, title)
# href should be url encoded (percent encodings present if needed)
# title should be xml decoded/unescaped
def setLandmarks(self, landmarks):
landmarks_xhtml = self.buildLandmarks(landmarks)
# replace the landmarks from the navsrc with a placeholer
navsrc = self.content
qp = QuickXHTMLParser()
qp.setContent(navsrc)
nav_type = None
res = []
skip_output = False
for txt, tp, tname, ttype, tattr in qp.parse_iter():
if txt is not None:
if not skip_output:
res.append(txt)
else:
if tname == "nav" and ttype == "begin":
nav_type = tattr.get("epub:type", None)
if nav_type is not None and nav_type == "landmarks":
res.append(SIGIL_REPLACE_LANDMARKS_HERE)
skip_output = True
continue
if tname == "nav" and ttype == "end" and nav_type == "landmarks":
nav_type = None
skip_output = False
continue
if not skip_output:
res.append(qp.tag_info_to_xml(tname, ttype, tattr))
navsrc = "".join(res)
m = re.search(NAV_LANDMARKS_PATTERN, navsrc)
if m is None:
return False
navsrc = navsrc[0:m.start()] + landmarks_xhtml + navsrc[m.end():]
self.content = navsrc
return True
# returns ordered list of tuples (page_number, href, title)
# href is url encoded (percent encodings if needed should be present))
# title has been xml decoded/unescaped
def getPageList(self):
# parse the nav source to get the page-list
navsrc = self.content
pagelist = []
qp = QuickXHTMLParser()
qp.setContent(navsrc)
pgcnt = 0
nav_type = None
href = None
title = ""
for txt, tp, tname, ttype, tattr in qp.parse_iter():
if txt is not None:
if ".a." in tp or tp.endswith(".a"):
title = title + txt
else:
title = ""
else:
if tname == "nav" and ttype == "begin":
nav_type = tattr.get("epub:type", None)
continue
if tname == "nav" and ttype == "end":
nav_type = None
continue
if nav_type is not None and nav_type == "page-list":
if tname == "a" and ttype == "begin" and nav_type == "page-list":
href = tattr.get("href", "")
# hrefs must be kept in raw urlencoded form that may contain fragments
continue
if tname == "a" and ttype == "end":
pgcnt += 1
title = xmldecode(title)
pagelist.append((pgcnt, href, title))
title = ""
continue
return pagelist
# replace the page with ordered list of tuples (page_number, href, title)
# href should be url encoded (percent encodings present if needed))
# title should be xml decoded/unescaped
def setPageList(self, pagelist):
pagelist_xhtml = self.buildPageList(pagelist)
# replace the pagelist from the navsrc with a placeholer
navsrc = self.content
qp = QuickXHTMLParser()
qp.setContent(navsrc)
nav_type = None
res = []
skip_output = False
found_page_list = False
for txt, tp, tname, ttype, tattr in qp.parse_iter():
if txt is not None:
if not skip_output:
res.append(txt)
else:
if tname == "nav" and ttype == "begin":
nav_type = tattr.get("epub:type", None)
if nav_type is not None and nav_type == "page-list":
res.append(SIGIL_REPLACE_PAGELIST_HERE)
found_page_list = True
skip_output = True
continue
if tname == "nav" and ttype == "end" and nav_type == "page-list":
nav_type = None
skip_output = False
continue
if tname == "body" and ttype == "end":
if not found_page_list and len(pagelist) > 0:
padding = res[-1]
res.append(SIGIL_REPLACE_PAGELIST_HERE)
res.append(padding)
found_page_list = True
if not skip_output:
res.append(qp.tag_info_to_xml(tname, ttype, tattr))
navsrc = "".join(res)
m = re.search(NAV_PAGELIST_PATTERN, navsrc)
if m is None:
return False
navsrc = navsrc[0:m.start()] + pagelist_xhtml + navsrc[m.end():]
self.content = navsrc
return True
# self.toclist is an ordered list of tuples (play_order, nesting_level, href, title)
# hrefs should be in url encoded form (percent encodings present if needed)
def buildTOC(self, toclist):
navres = []
ind = ' '
ibase = ind * 3
incr = ind * 2
# start with the toc
navres.append(ind * 2 + '<nav epub:type="toc" id="toc">\n')
navres.append(ind * 3 + '<h1>Table of Contents</h1>\n')
navres.append(ibase + '<ol>\n')
curlvl = 1
initial = True
for po, lvl, href, lbl in toclist:
lbl = xmlencode(lbl)
if lvl > curlvl:
while lvl > curlvl:
indent = ibase + incr * (curlvl)
navres.append(indent + '<ol>\n')
navres.append(indent + ind + '<li>\n')
navres.append(indent + ind * 2 + '<a href="%s">%s</a>\n' % (href, lbl))
curlvl += 1
elif lvl < curlvl:
while lvl < curlvl:
indent = ibase + incr * (curlvl - 1)
navres.append(indent + ind + '</li>\n')
navres.append(indent + '</ol>\n')
curlvl -= 1
indent = ibase + incr * (lvl - 1)
navres.append(indent + ind + '</li>\n')
navres.append(indent + ind + '<li>\n')
navres.append(indent + ind * 2 + '<a href="%s">%s</a>\n' % (href, lbl))
else:
indent = ibase + incr * (lvl - 1)
if not initial:
navres.append(indent + ind + '</li>\n')
navres.append(indent + ind + '<li>\n')
navres.append(indent + ind * 2 + '<a href="%s">%s</a>\n' % (href, lbl))
initial = False
curlvl = lvl
while(curlvl > 0):
indent = ibase + incr * (curlvl - 1)
navres.append(indent + ind + "</li>\n")
navres.append(indent + "</ol>\n")
curlvl -= 1
navres.append(ind * 2 + '</nav>\n')
return "".join(navres)
# self.pagelist is an ordered list of tuples (page_number, href, title)
# href should be url encoded (percent encodings present if needed)
def buildPageList(self, pagelist):
navres = []
ind = ' '
# add any existing page-list if need be
if len(pagelist) > 0:
navres.append(ind * 2 + '<nav epub:type="page-list" id="page-list" hidden="">\n')
navres.append(ind * 3 + '<ol>\n')
for pn, href, title in pagelist:
title = xmlencode(title)
navres.append(ind * 4 + '<li><a href="%s">%s</a></li>\n' % (href, title))
navres.append(ind * 3 + '</ol>\n')
navres.append(ind * 2 + '</nav>\n')
return "".join(navres)
# self.landmarks is an ordered list of tuples (epub_type, href, title)
# href should be url encoded (percent encodings present if needed)
def buildLandmarks(self, landmarks):
navres = []
ind = ' '
navres.append(ind * 2 + '<nav epub:type="landmarks" id="landmarks" hidden="">\n')
navres.append(ind * 3 + '<h2>Guide</h2>\n')
navres.append(ind * 3 + '<ol>\n')
for etyp, href, title in landmarks:
title = xmlencode(title)
navres.append(ind * 4 + '<li>\n')
navres.append(ind * 5 + '<a epub:type="%s" href="%s">%s</a>\n' % (etyp, href, title))
navres.append(ind * 4 + '</li>\n')
navres.append(ind * 3 + '</ol>\n')
navres.append(ind * 2 + '</nav>\n')
return "".join(navres)
# returns the nav source code as a unicode string in its current form
def getNavSrc(self):
return self.content
def main(argv=sys.argv):
if len(argv) != 2:
print("navprocessor.py nav_file_path")
return -1
navpath = argv[1]
navsrc = ""
with open(navpath, 'rb') as f:
navsrc = f.read()
navsrc = navsrc.decode('utf-8')
np = NavProcessor(navsrc)
landmarks = np.getLandmarks()
pagelist = np.getPageList()
toclist = np.getTOC()
print(toclist)
print(landmarks)
print(pagelist)
print(np.setLandmarks(landmarks))
print(np.setPageList(pagelist))
print(np.setTOC(toclist))
print(np.getNavSrc())
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
SkySchermer/uweclang | uweclang/batch/batch.py | 1 | 12226 | """UWEC Language Tools batch tools module
Provides functions for processing data in batches.
Module Globals:
BATCH_PARSER: Provides a template ArgumentParser for accepting arguments
concerning batch file processing. It provides the following setup:
Positional Arguments:
file: Any number of input files or directories.
Optional Arguments:
--file: Any number of extra input files or directories.
--recursive: Flag to select for recursive traversal of directories.
--output: Output directory.
--quiet: Silence process output.
--verbose: Select process output level.
--batch-size: Size of batches, or number of division, depending on the batch-mode.
--batch-mode: Select how to do batching.
"""
# Python 3 forward compatability imports.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Standard imports
import os
import errno
import argparse
from itertools import chain
from math import ceil
# Setup logger.
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
# Build base for batch argument parsing.
BATCH_PARSER = argparse.ArgumentParser(add_help=False)
BATCH_PARSER.add_argument('file',
nargs='*',
default=['.'],
metavar='input-file',
help="""
the files and directories to process. Each directory will be scanned for
files to process.
""")
BATCH_PARSER.add_argument('-f', '--file',
nargs='*',
default=[],
metavar='input-file',
dest='extra_files',
help='additional input files')
BATCH_PARSER.add_argument('-e', '--extensions',
nargs='*',
metavar='ext',
dest='extensions',
help='accepted file extensions for input')
BATCH_PARSER.add_argument('-r', '--recursive',
action='store_true',
help='search directories for input files recursively')
BATCH_PARSER.add_argument('-o', '--output-dir',
default='.',
metavar='output-dir',
dest='output_dir',
help='the output directory')
BATCH_PARSER.add_argument('-w', '--no-overwrite',
nargs='*',
metavar='ext',
dest='no_overwrite',
help="""
existing file extensions that will prevent processing. If a file with the
same name containing one of the given extensions exists, the process will
skip to the next file.
""")
_GROUP = BATCH_PARSER.add_mutually_exclusive_group()
_GROUP.add_argument('-q', '--quiet',
action='store_true',
help='silence all output')
_GROUP.add_argument('-v', '--verbose',
action='count',
help='provide detailed information')
BATCH_PARSER.add_argument('-b', '--batch-size',
nargs='?',
type=int,
default=10,
metavar='batch-size',
dest='batch_size',
help="""
batch the output into subdirectories according to the batch-mode. The
batch-size parameter is used together with the --batch-mode option to
determine how many subdirectories or files per subdirectories to use.
""")
BATCH_PARSER.add_argument('-m', '--batch-mode',
nargs='?',
choices=['none', 'count', 'divide'],
default='none',
const='count',
metavar='mode',
dest='batch_mode',
help="""
sets the batch mode. By default, no batch mode is used, and all files are
placed in the output directory. In count mode, each batch will contain at
most the number of files specified by the --batch-size argument (default
10). In divide mode, there will be that number of batch directories, and
files will be divided evenly between them. If the flag is provided but no
mode is given, count mode will be used.
""")
def split_ext(filename):
"""Splits a filename into its base name and extention part. This is
different from os.path.splitext because it collects nested extensions
together. Any path information will be stripped away. Examples:
os.path.splitext('file.ext1.ext2') = ('file.ext1', '.ext2')
uweclang.split_ext('file.ext1.ext2') = ('file', '.ext1.ext2')
Arguments:
filename (str): The filename to split.
Returns:
(str, str): The filename's (base, extension) pair.
"""
if not filename:
return ''
filename = os.path.basename(filename)
parts = filename.split('.')
if parts[0] == '':
fn = '.' + parts[1]
exts = parts[2:]
else:
fn = parts[0]
exts = parts[1:]
exts = ['.' + x for x in exts]
return (fn, ''.join(exts))
def get_files(search_locations, extensions=None, recursive=False):
"""Searches the given locations for files with the given extensions.
Arguments:
search_locations ([str]): A list of files and directories to search.
extensions ([str]): The file extensions to find in directories.
Defaults to None, which will find all files.
recursive: (bool): Whether to search directories recursively.
Returns:
(Generator(str), int): returns a pair containing a generator for
iterating over the found files, and the number of files found.
Note: Files given in search_locations that do not have the specified
extensions will be included in the output. The extensions argument only
effects files in the directories given.
"""
search_locations = set(search_locations)
files = [x for x in search_locations if os.path.isfile(x)]
dirs = [x for x in search_locations if os.path.isdir(x)]
# Identify all the files to process.
for item in dirs:
if os.path.isdir(item):
# The os.walk call will traverse the directory producing
# (path, dirs, files) tuples.
targets = []
for x in os.walk(item):
if not recursive:
# Prevent recursive walk. This deletes all items in the
# list x[1] before the next level of the hierarchy is
# generated.
del x[1][:]
# Get all files in the directory.
targets.append([os.path.join(x[0], l) for l in x[2]])
# Flatten the list of lists of files.
targets = chain.from_iterable(targets)
for x in targets:
# pprint(split_ext(x)[1]); pprint(extensions)
if (extensions is None or
split_ext(x)[1].endswith(tuple(extensions))):
files.append(x)
else:
# Handle a file.
if (extensions is None or
split_ext(item)[1].endswith(tuple(extensions))):
files.append(item)
full_files = set()
for item in files:
full_files.add(os.path.abspath(item))
return ((x for x in full_files), len(full_files))
def batch_process(process, **kwargs):
"""Runs a process on a set of files and batches them into subdirectories.
Arguments:
process ((IN, OUT, Verbosity) -> str): The function to execute on each
file.
Keyword Arguments:
file (Optional[str]): The input files and directories.
outpu_dir (Optional[str]): The output directory.
batch_size (Optional[int]): The size of each subdirectory or the number
of subdirectories, depending on the batch_mode.
batch_mode (Optional[str]): The batch mode. Can be one of 'count' or
'divide'. In count mode, each batch will contain at most the number
of files specified by the batch_size (default 10). In divide mode,
there will be that number of batch directories, and files will be
divided evenly between them.
batch_dir_format (Optional[str]): The format string for batch
subdirectory names. Defaults to 'batch{:03}'
verbosity (int): The verbosity of the output.
Returns:
(None)
"""
# Get values from kwargs:
search_locations = kwargs.get('file', ['.'])
search_locations.extend(kwargs.get('extra_files', []))
extensions = kwargs.get('extensions', [])
recursive = kwargs.get('recursive', False)
output_dir = os.path.abspath(kwargs.get('output_dir', '.'))
no_overwrite = kwargs.get('no_overwrite', [])
verbosity = kwargs.get('verbose', 1)
if kwargs.get('quiet', False):
verbosity = 0
batch_mode = kwargs.get('batch_mode', 'none')
batch_size = kwargs.get('batch_size', 10)
batch_dir_format = kwargs.get('batch_dir_prefix', 'batch{:03}')
# Get files to process
files, file_count = get_files(search_locations,
extensions,
recursive)
if verbosity >= 3:
pprint(kwargs)
# Prepare batching info
if batch_mode == 'none':
batch_count = 0
elif batch_mode == 'divide':
batch_count = batch_size
elif batch_mode == 'count':
batch_count = int(ceil(file_count / batch_size))
batches = []
for batch_num in range(0, batch_count):
batch_name = batch_dir_format.format(batch_num)
batch_path = os.path.join(output_dir, batch_name)
batches.append((batch_path, batch_name))
# Create batch directory.
try:
if verbosity >= 3:
print('Creating directory: {}', os.path.relpath(batch_path))
os.makedirs(batch_path)
except OSError as e:
if e.errno == errno.EEXIST:
# We don't care if directory already exists.
pass
# Assign files to batches using (input_file, output_location)
out = output_dir
assigned_files = []
for i, item in enumerate(files):
if batch_count > 0:
out, short = batches[i % len(batches)]
assigned_files.append((item, out))
# Check for already existing outputs.
existing = get_files(output_dir,
no_overwrite,
recursive=True)[0]
existing = {split_ext(x)[0] : x for x in existing}
if verbosity >= 3:
print('Process preventing extensions:', no_overwrite)
if no_overwrite:
if verbosity >= 1:
print('\n--- Checking for existing files of types: {} ---'
''.format(no_overwrite))
# Function for checking if file exists in output_dir
def check(file_name):
base, ext = split_ext(file_name)
over_written = existing.get(base, False)
if over_written:
existing_ext = split_ext(existing[base])[1]
if existing_ext.endswith(tuple(no_overwrite)):
print('Skip {}{} -> "{}"'
''.format(base, ext, os.path.relpath(existing[base])))
return False
return True
# Filter for files that don't exist in output_dir
assigned_files = [x for x in assigned_files if check(x[0])]
if verbosity >= 1 and len(assigned_files) == 0:
print('--- No files to process ---\n')
return
if verbosity >= 1:
print('\n--- Begin Processing {} files ---'
''.format(len(assigned_files)))
# Process each file:
for item, out in assigned_files:
process(item, out, verbosity=verbosity)
if verbosity >= 1:
print('--- End Processing ---\n')
| mit |
clarkfitzg/dask | dask/array/tests/test_image.py | 5 | 1472 | from contextlib import contextmanager
import os
import shutil
from tempfile import mkdtemp
import pytest
pytest.importorskip('skimage')
from dask.array.image import imread as da_imread
import numpy as np
from skimage.io import imread, imsave
@contextmanager
def random_images(n, shape):
dirname = mkdtemp()
for i in range(n):
fn = os.path.join(dirname, 'image.%d.png' % i)
x = np.random.randint(0, 255, size=shape).astype('i1')
imsave(fn, x)
try:
yield os.path.join(dirname, '*.png')
finally:
shutil.rmtree(dirname)
def test_imread():
with random_images(4, (5, 6, 3)) as globstring:
im = da_imread(globstring)
assert im.shape == (4, 5, 6, 3)
assert im.chunks == ((1, 1, 1, 1), (5,), (6,), (3,))
assert im.dtype == 'uint8'
assert im.compute().shape == (4, 5, 6, 3)
assert im.compute().dtype == 'uint8'
def test_imread_with_custom_function():
def imread2(fn):
return np.ones((2, 3, 4), dtype='i1')
with random_images(4, (5, 6, 3)) as globstring:
im = da_imread(globstring, imread=imread2)
assert (im.compute() == np.ones((4, 2, 3, 4), dtype='i1')).all()
def test_preprocess():
def preprocess(x):
x[:] = 1
return x[:, :, 0]
with random_images(4, (2, 3, 4)) as globstring:
im = da_imread(globstring, preprocess=preprocess)
assert (im.compute() == np.ones((4, 2, 3), dtype='i1')).all()
| bsd-3-clause |
MotorolaMobilityLLC/external-chromium_org | chrome/common/extensions/docs/server2/object_store_creator_test.py | 52 | 1906 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from appengine_wrappers import GetAppVersion
from test_object_store import TestObjectStore
from object_store_creator import ObjectStoreCreator
class _FooClass(object):
def __init__(self): pass
class ObjectStoreCreatorTest(unittest.TestCase):
def setUp(self):
self._creator = ObjectStoreCreator(start_empty=False,
store_type=TestObjectStore,
disable_wrappers=True)
def testVanilla(self):
store = self._creator.Create(_FooClass)
self.assertEqual(
'class=_FooClass&app_version=%s' % GetAppVersion(),
store.namespace)
self.assertFalse(store.start_empty)
def testWithCategory(self):
store = self._creator.Create(_FooClass, category='hi')
self.assertEqual(
'class=_FooClass&category=hi&app_version=%s' % GetAppVersion(),
store.namespace)
self.assertFalse(store.start_empty)
def testWithoutAppVersion(self):
store = self._creator.Create(_FooClass, app_version=None)
self.assertEqual('class=_FooClass', store.namespace)
self.assertFalse(store.start_empty)
def testStartConfiguration(self):
store = self._creator.Create(_FooClass, start_empty=True)
self.assertTrue(store.start_empty)
store = self._creator.Create(_FooClass, start_empty=False)
self.assertFalse(store.start_empty)
self.assertRaises(ValueError, ObjectStoreCreator)
def testIllegalCharacters(self):
self.assertRaises(ValueError,
self._creator.Create, _FooClass, app_version='1&2')
self.assertRaises(ValueError,
self._creator.Create, _FooClass, category='a=&b')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
exosite-labs/pyonep | pyonep/portals/__init__.py | 1 | 20890 | # pylint: disable=W0312,R0913
import requests, json
from pyonep.portals.constants import HTTP_STATUS
from pyonep.portals.endpoints import Endpoints
from pyonep.portals.utils import dictify_device_meta,\
stringify_device_meta
from getpass import getpass
import sys
if sys.version_info[0] < 3:
_input = raw_input
else:
_input = input
class Portals(Endpoints):
def __init__( self,
domain,
portal_name,
user,
auth='__prompt__',
use_token=False,
debug=False):
"""
Params:
domain: the domain of the Exosite domain your Portal is on.
i.e. mydomain.exosite.com
portal_name: the name of the Exosite Portal. i.e. 'My Device Portal'
user: typically, the email address you use to logon to Portals
or your Portals user name
auth: if left blank, the creator of the Portals object will
be prompted for their Portals password if specifying the
auth parameter, it should be either the user password or a
Portals token
use_token: if using a token in the auth parameter, set this to True.
Otherwise, leave blank
"""
if auth == '__prompt__':
print('') # some interpreters don't put a newline before the getpass prompt
auth = getpass('Enter User Password: ')
else:
auth = auth
Endpoints.__init__( self,
domain,
portal_name,
user,
auth,
use_token=use_token
)
def get_portals_list(self):
""" Method to return a list of Portal names with their id's.
Returns list of tuples as [(id_1, portal_name1), (id_2, portal_name2)]
"""
portal_ids = self.get_domain_portal_ids()
portals = [ (p, self.get_portal_by_id(p)) for p in portal_ids ]
return [ (p[0], p[1]['info']['description']['name'], p) for p in portals ]
def user_portals_picker(self):
"""
This function is broken and needs to either be fixed or discarded.
User-Interaction function. Allows user to choose which Portal
to make the active one.
"""
# print("Getting Portals list. This could take a few seconds...")
portals = self.get_portals_list()
done = False
while not done:
opts = [ (i, p) for i, p in enumerate(portals) ]
# print('')
for opt, portal in opts:
print("\t{0} - {1}".format(opt, portal[1]))
# print('')
valid_choices = [o[0] for o in opts]
choice = _input("Enter choice ({0}): ".format(valid_choices) )
if int(choice) in valid_choices:
done = True
# loop through all portals until we find an 'id':'rid' match
self.set_portal_name( opts[int(choice)][1][1] )
self.set_portal_id( opts[int(choice)][1][0] )
# self.set_portal_rid( opts[int(choice)][1][2][1]['info']['key'] )
# self.__portal_sn_rid_dict = opts[int(choice)][1][2][1]['info']['aliases']
else:
print("'{0}' is not a valid choice. Please choose from {1}".format(
choice, valid_choices))
def get_portal_by_name(self, portal_name):
"""
Set active portal according to the name passed in 'portal_name'.
Returns dictionary of device 'serial_number: rid'
"""
portals = self.get_portals_list()
for p in portals:
# print("Checking {!r}".format(p))
if portal_name == p[1]:
# print("Found Portal!")
self.set_portal_name( p[1] )
self.set_portal_id( p[0] )
self.set_portal_cik( p[2][1]['info']['key'] )
# print("Active Portal Details:\nName: {0}\nId: {1}\nCIK: {2}".format(
# self.portal_name(),
# self.portal_id(),
# self.portal_cik()))
return p
return None
@classmethod
def login_to_portal(cls,
domain=None,
portal_name=None,
user=None,
credential=None,
use_token=False,
portal_id=None,
# portal_rid=None,
get_devices=False,
debug=False):
"""
A classmethod that returns a (token, Portals object) tuple.
This method can be interactive based on the input arguments.
Sets up the Portals object with all the member variables
it needs to make future api calls. It basically just calls
Portals.get_portal_by_name(), but instead of returning a
Portals object, it returns a Portals() object.
This is a convenience function that can be called at the
time of instantiation.
Instantiate the Portals object with user/password authentication
then call this function. The resultant object will be a Portals
object that uses token authentication for all future calls
instead of using user/password credentials.
Examples:
# for interactive mode, get a password prompt, a token
# and a logged-in Portals object
token, B = Portals.login_to_portal( domain=<domain>,
portal_name=<portal>,
user=<user/email>
)
# for non-interactive mode, passing in user password to
# get a token and a logged-in Portals object
token, B = Portals.login_to_portal( domain=<domain>,
portal_name=<portal>,
user=<user/email>,
credential=<password>
)
# for non-interactive mode, passing in token to get a
# logged-in Portals object
token, B = Portals.login_to_portal( domain=<domain>,
portal_name=<portal>,
user=<user/email>,
credential=<token>,
use_token=True
)
# for non-interactive mode, passing in token and id
# to get a Portals object that doesn't need to make any
# Portals API calls.
token, B = Portals.login_to_portal( domain=<domain>,
portal_name=<portal>,
user=<user/email>,
credential=<token>,
use_token=True,
portal_id=<portal_id>,
portal_rid=<portal_rid>
)
"""
if domain is None:
domain = _input("Enter domain: ")
if portal_name is None:
portal_name = _input("Enter name of Portal: ")
if user is None:
user = _input("Enter username: ")
if None is credential:
# interactive mode
B = Portals( domain=domain,
portal_name=portal_name,
user=user,
debug=debug
)
token = B.get_user_token()
# print("Got token {0}".format(token))
elif not None is credential and not use_token:
# non-interactive, using a user-password to retrieve token
B = Portals( domain=domain,
portal_name=portal_name,
user=user,
auth=credential,
debug=debug
)
token = B.get_user_token()
elif not None is credential and use_token:
# non-interactive, mainly just need to instantiate an object.
B = Portals( domain=domain,
portal_name=portal_name,
user=user,
auth=credential,
use_token=use_token,
debug=debug
)
token = credential
if portal_id is None: # or portal_rid is None:
B.get_portal_by_name(B.portal_name())
else:
B.set_portal_id(portal_id)
# B.set_portal_rid(portal_rid)
if get_devices:
B.map_aliases_to_device_objects()
return token, B
def rename_device(self, device_obj, new_name):
"""
Returns 'device object' of newly created device.
http://docs.exosite.com/portals/#update-device
http://docs.exosite.com/portals/#device-object
"""
device_obj['info']['description']['name'] = new_name
return self.update_device(device_obj)
def add_device_with_name_location_timezone( self,
model,
serial,
name,
location,
timezone):
"""
This method wraps the self.add_device() and self.rename_device()
methods.
Returns device object.
"""
retval = None
retval = self.add_location_timezone_to_device(
self.rename_device(
self.add_device(
model,
serial),
name
),
location,
timezone
)
return retval
def add_location_timezone_to_device(self, device_obj, location, timezone):
"""
Returns 'device object' with updated location
http://docs.exosite.com/portals/#update-device
http://docs.exosite.com/portals/#device-object
"""
dictify_device_meta(device_obj)
device_obj['info']['description']['meta']['location'] = location
device_obj['info']['description']['meta']['Location'] = location
device_obj['info']['description']['meta']['timezone'] = timezone
device_obj['info']['description']['meta']['Timezone'] = timezone
return self.update_device(device_obj)
def delete_device(self, rid):
"""
Deletes device object with given rid
http://docs.exosite.com/portals/#delete-device
"""
headers = {
'User-Agent': self.user_agent(),
'Content-Type': self.content_type()
}
headers.update(self.headers())
r = requests.delete( self.portals_url()+'/devices/'+rid,
headers=headers,
auth=self.auth())
if HTTP_STATUS.NO_CONTENT == r.status_code:
print("Successfully deleted device with rid: {0}".format(rid))
return True
else:
print("Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
r.raise_for_status()
return False
def list_portal_data_sources(self):
"""
List data sources of the portal.
http://docs.exosite.com/portals/#list-portal-data-source
"""
headers = {
'User-Agent': self.user_agent(),
}
headers.update(self.headers())
r = requests.get( self.portals_url()+'/portals/'+self.portal_id()+'/data-sources',
headers=headers,
auth=self.auth()
)
if HTTP_STATUS.OK == r.status_code:
return r.json()
else:
print("Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
return {}
def list_device_data_sources(self, device_rid):
"""
List data sources of a portal device with rid 'device_rid'.
http://docs.exosite.com/portals/#list-device-data-source
"""
headers = {
'User-Agent': self.user_agent(),
}
headers.update(self.headers())
r = requests.get( self.portals_url()+'/devices/'+device_rid+'/data-sources',
headers=headers, auth=self.auth())
if HTTP_STATUS.OK == r.status_code:
return r.json()
else:
print("Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
return None
def get_data_source_bulk_request(self, rids, limit=5):
"""
This grabs each datasource and its multiple datapoints for a particular device.
"""
headers = {
'User-Agent': self.user_agent(),
'Content-Type': self.content_type()
}
headers.update(self.headers())
r = requests.get( self.portals_url()
+'/data-sources/['
+",".join(rids)
+']/data?limit='+str(limit),
headers=headers, auth=self.auth())
if HTTP_STATUS.OK == r.status_code:
return r.json()
else:
print("Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
return {}
def get_cik(self, rid):
"""
Retrieves the CIK key for a device.
"""
device = self.get_device(rid)
return device['info']['key']
def get_all_devices_in_portal(self):
"""
This loops through the get_multiple_devices method 10 rids at a time.
"""
rids = self.get_portal_by_name(
self.portal_name()
)[2][1]['info']['aliases']
# print("RIDS: {0}".format(rids))
device_rids = [ rid.strip() for rid in rids ]
blocks_of_ten = [ device_rids[x:x+10] for x in range(0, len(device_rids), 10) ]
devices = []
for block_of_ten in blocks_of_ten:
retval = self.get_multiple_devices(block_of_ten)
if retval is not None:
devices.extend( retval )
else:
print("Not adding to device list: {!r}".format(retval))
# Parse 'meta' key's raw string values for each device
for device in devices:
dictify_device_meta(device)
return devices
def map_aliases_to_device_objects(self):
"""
A device object knows its rid, but not its alias.
A portal object knows its device rids and aliases.
This function adds an 'portals_aliases' key to all of the
device objects so they can be sorted by alias.
"""
all_devices = self.get_all_devices_in_portal()
for dev_o in all_devices:
dev_o['portals_aliases'] = self.get_portal_by_name(
self.portal_name()
)[2][1]['info']['aliases'][ dev_o['rid'] ]
return all_devices
def search_for_devices_by_serial_number(self, sn):
"""
Returns a list of device objects that match the serial number
in param 'sn'.
This will match partial serial numbers.
"""
import re
sn_search = re.compile(sn)
matches = []
for dev_o in self.get_all_devices_in_portal():
# print("Checking {0}".format(dev_o['sn']))
try:
if sn_search.match(dev_o['sn']):
matches.append(dev_o)
except TypeError as err:
print("Problem checking device {!r}: {!r}".format(
dev_o['info']['description']['name'],
str(err)))
return matches
def print_device_list(self, device_list=None):
"""
Optional parameter is a list of device objects. If omitted, will
just print all portal devices objects.
"""
dev_list = device_list if device_list is not None else self.get_all_devices_in_portal()
for dev in dev_list:
print('{0}\t\t{1}\t\t{2}'.format(
dev['info']['description']['name'],
dev['sn'],
dev['portals_aliases']\
if len(dev['portals_aliases']) != 1
else dev['portals_aliases'][0]
)
)
def print_sorted_device_list(self, device_list=None, sort_key='sn'):
"""
Takes in a sort key and prints the device list according to that sort.
Default sorts on serial number.
Current supported sort options are:
- name
- sn
- portals_aliases
Can take optional device object list.
"""
dev_list = device_list if device_list is not None else self.get_all_devices_in_portal()
sorted_dev_list = []
if sort_key == 'sn':
sort_keys = [ k[sort_key] for k in dev_list if k[sort_key] is not None ]
sort_keys = sorted(sort_keys)
for key in sort_keys:
sorted_dev_list.extend([ d for d in dev_list if d['sn'] == key ])
elif sort_key == 'name':
sort_keys = [ k['info']['description'][sort_key]\
for k in dev_list if k['info']['description'][sort_key] is not None ]
sort_keys = sorted(sort_keys)
for key in sort_keys:
sorted_dev_list.extend( [ d for d in dev_list\
if d['info']['description'][sort_key] == key
]
)
elif sort_key == 'portals_aliases':
sort_keys = [ k[sort_key] for k in dev_list if k[sort_key] is not None ]
sort_keys = sorted(sort_keys)
for key in sort_keys:
sorted_dev_list.extend([ d for d in dev_list if d[sort_key] == key ])
else:
print("Sort key {!r} not recognized.".format(sort_key))
sort_keys = None
self.print_device_list(device_list=sorted_dev_list)
def get_user_id_from_email(self, email):
""" Uses the get-all-user-accounts Portals API to retrieve the
user-id by supplying an email. """
accts = self.get_all_user_accounts()
for acct in accts:
if acct['email'] == email:
return acct['id']
return None
def get_user_permission_from_email(self, email):
""" Returns a user's permissions object when given the user email."""
_id = self.get_user_id_from_email(email)
return self.get_user_permission(_id)
def add_dplist_permission_for_user_on_portal(self, user_email, portal_id):
""" Adds the 'd_p_list' permission to a user object when provided
a user_email and portal_id."""
_id = self.get_user_id_from_email(user_email)
print(self.get_user_permission_from_email(user_email))
retval = self.add_user_permission( _id, json.dumps(
[{'access': 'd_p_list', 'oid':{'id': portal_id, 'type':'Portal'}}]
)
)
print(self.get_user_permission_from_email(user_email))
return retval
def get_portal_cik(self, portal_name):
""" Retrieves portal object according to 'portal_name' and
returns its cik. """
portal = self.get_portal_by_name(portal_name)
cik = portal[2][1]['info']['key']
return cik
| bsd-3-clause |
nkhuyu/graphite-web | webapp/graphite/metrics/views.py | 23 | 9657 | """Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from urllib2 import urlopen
from django.conf import settings
from graphite.compat import HttpResponse, HttpResponseBadRequest
from graphite.util import getProfile, json
from graphite.logger import log
from graphite.storage import STORE
from graphite.metrics.search import searcher
from graphite.carbonlink import CarbonLink
import fnmatch, os
try:
import cPickle as pickle
except ImportError:
import pickle
def index_json(request):
jsonp = request.REQUEST.get('jsonp', False)
cluster = request.REQUEST.get('cluster', False)
def find_matches():
matches = []
for root, dirs, files in os.walk(settings.WHISPER_DIR):
root = root.replace(settings.WHISPER_DIR, '')
for basename in files:
if fnmatch.fnmatch(basename, '*.wsp'):
matches.append(os.path.join(root, basename))
for root, dirs, files in os.walk(settings.CERES_DIR):
root = root.replace(settings.CERES_DIR, '')
for filename in files:
if filename == '.ceres-node':
matches.append(root)
matches = [
m
.replace('.wsp', '')
.replace('.rrd', '')
.replace('/', '.')
.lstrip('.')
for m in sorted(matches)
]
return matches
matches = []
if cluster and len(settings.CLUSTER_SERVERS) > 1:
matches = reduce( lambda x, y: list(set(x + y)), \
[json.loads(urlopen("http://" + cluster_server + "/metrics/index.json").read()) \
for cluster_server in settings.CLUSTER_SERVERS])
else:
matches = find_matches()
return json_response_for(request, matches, jsonp=jsonp)
def search_view(request):
try:
query = str( request.REQUEST['query'] )
except:
return HttpResponseBadRequest(content="Missing required parameter 'query'",
content_type="text/plain")
search_request = {
'query' : query,
'max_results' : int( request.REQUEST.get('max_results', 25) ),
'keep_query_pattern' : int(request.REQUEST.get('keep_query_pattern', 0)),
}
#if not search_request['query'].endswith('*'):
# search_request['query'] += '*'
results = sorted(searcher.search(**search_request))
return json_response_for(request, dict(metrics=results))
def find_view(request):
"View for finding metrics matching a given pattern"
profile = getProfile(request)
format = request.REQUEST.get('format', 'treejson')
local_only = int( request.REQUEST.get('local', 0) )
wildcards = int( request.REQUEST.get('wildcards', 0) )
fromTime = int( request.REQUEST.get('from', -1) )
untilTime = int( request.REQUEST.get('until', -1) )
jsonp = request.REQUEST.get('jsonp', False)
if fromTime == -1:
fromTime = None
if untilTime == -1:
untilTime = None
automatic_variants = int( request.REQUEST.get('automatic_variants', 0) )
try:
query = str( request.REQUEST['query'] )
except:
return HttpResponseBadRequest(content="Missing required parameter 'query'",
content_type="text/plain")
if '.' in query:
base_path = query.rsplit('.', 1)[0] + '.'
else:
base_path = ''
if format == 'completer':
query = query.replace('..', '*.')
if not query.endswith('*'):
query += '*'
if automatic_variants:
query_parts = query.split('.')
for i,part in enumerate(query_parts):
if ',' in part and '{' not in part:
query_parts[i] = '{%s}' % part
query = '.'.join(query_parts)
try:
matches = list( STORE.find(query, fromTime, untilTime, local=local_only) )
except:
log.exception()
raise
log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches)))
matches.sort(key=lambda node: node.name)
log.info("received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d" % (query, fromTime, untilTime, local_only, format, len(matches)))
if format == 'treejson':
content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards)
response = json_response_for(request, content)
elif format == 'pickle':
content = pickle_nodes(matches)
response = HttpResponse(content, content_type='application/pickle')
elif format == 'completer':
results = []
for node in matches:
node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf)))
if not node.is_leaf:
node_info['path'] += '.'
results.append(node_info)
if len(results) > 1 and wildcards:
wildcardNode = {'name' : '*'}
results.append(wildcardNode)
response = json_response_for(request, { 'metrics' : results }, jsonp=jsonp)
else:
return HttpResponseBadRequest(
content="Invalid value for 'format' parameter",
content_type="text/plain")
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def expand_view(request):
"View for expanding a pattern into matching metric paths"
local_only = int( request.REQUEST.get('local', 0) )
group_by_expr = int( request.REQUEST.get('groupByExpr', 0) )
leaves_only = int( request.REQUEST.get('leavesOnly', 0) )
results = {}
for query in request.REQUEST.getlist('query'):
results[query] = set()
for node in STORE.find(query, local=local_only):
if node.is_leaf or not leaves_only:
results[query].add( node.path )
# Convert our results to sorted lists because sets aren't json-friendly
if group_by_expr:
for query, matches in results.items():
results[query] = sorted(matches)
else:
results = sorted( reduce(set.union, results.values(), set()) )
result = {
'results' : results
}
response = json_response_for(request, result)
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def get_metadata_view(request):
key = request.REQUEST['key']
metrics = request.REQUEST.getlist('metric')
results = {}
for metric in metrics:
try:
results[metric] = CarbonLink.get_metadata(metric, key)
except:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.get_metadata(%s, %s)" % (metric, key))
return json_response_for(request, results)
def set_metadata_view(request):
results = {}
if request.method == 'GET':
metric = request.GET['metric']
key = request.GET['key']
value = request.GET['value']
try:
results[metric] = CarbonLink.set_metadata(metric, key, value)
except:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.set_metadata(%s, %s)" % (metric, key))
elif request.method == 'POST':
if request.META.get('CONTENT_TYPE') == 'application/json':
operations = json.loads( request.body )
else:
operations = json.loads( request.POST['operations'] )
for op in operations:
metric = None
try:
metric, key, value = op['metric'], op['key'], op['value']
results[metric] = CarbonLink.set_metadata(metric, key, value)
except:
log.exception()
if metric:
results[metric] = dict(error="Unexpected error occurred in bulk CarbonLink.set_metadata(%s)" % metric)
else:
results = dict(error="Invalid request method")
return json_response_for(request, results)
def tree_json(nodes, base_path, wildcards=False):
results = []
branchNode = {
'allowChildren': 1,
'expandable': 1,
'leaf': 0,
}
leafNode = {
'allowChildren': 0,
'expandable': 0,
'leaf': 1,
}
#Add a wildcard node if appropriate
if len(nodes) > 1 and wildcards:
wildcardNode = {'text' : '*', 'id' : base_path + '*'}
if any(not n.is_leaf for n in nodes):
wildcardNode.update(branchNode)
else:
wildcardNode.update(leafNode)
results.append(wildcardNode)
found = set()
results_leaf = []
results_branch = []
for node in nodes: #Now let's add the matching children
if node.name in found:
continue
found.add(node.name)
resultNode = {
'text' : str(node.name),
'id' : base_path + str(node.name),
}
if node.is_leaf:
resultNode.update(leafNode)
results_leaf.append(resultNode)
else:
resultNode.update(branchNode)
results_branch.append(resultNode)
results.extend(results_branch)
results.extend(results_leaf)
return results
def pickle_nodes(nodes):
nodes_info = []
for node in nodes:
info = dict(path=node.path, is_leaf=node.is_leaf)
if node.is_leaf:
info['intervals'] = node.intervals
nodes_info.append(info)
return pickle.dumps(nodes_info, protocol=-1)
def json_response_for(request, data, content_type='application/json',
jsonp=False, **kwargs):
accept = request.META.get('HTTP_ACCEPT', 'application/json')
ensure_ascii = accept == 'application/json'
content = json.dumps(data, ensure_ascii=ensure_ascii)
if jsonp:
content = "%s(%s)" % (jsonp, content)
content_type = 'text/javascript'
if not ensure_ascii:
content_type += ';charset=utf-8'
return HttpResponse(content, content_type=content_type, **kwargs)
| apache-2.0 |
terbolous/SickRage | lib/pgi/clib/gir/girepository.py | 19 | 7060 | # Copyright 2012 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
from ctypes import POINTER, c_char_p, c_void_p
from ..glib import guint, gchar_p, GErrorPtr, gboolean, gint, unpack_glist
from ..glib import GSListPtr, GOptionGroupPtr, GListPtr, gerror
from ..glib import unpack_nullterm_array
from ..gobject import GType
from .gibaseinfo import GIBaseInfo
from .gitypelib import GITypelib
from .error import GIError
from .._utils import find_library, wrap_class, fsdecode
from .._compat import PY3, xrange
_gir = find_library("girepository-1.0")
class GIRepositoryError(guint):
TYPELIB_NOT_FOUND = 0
NAMESPACE_MISMATCH = 1
NAMESPACE_VERSION_CONFLICT = 2
LIBRARY_NOT_FOUND = 3
class GIRepositoryLoadFlags(guint):
LAZY = 1
class GIRepository(c_void_p):
def get_infos(self, namespace):
if PY3:
namespace = namespace.encode("ascii")
for index in xrange(self._get_n_infos(namespace)):
res = self._get_info(namespace, index)
yield GIBaseInfo._cast(res)
def get_info(self, namespace, index):
if PY3:
namespace = namespace.encode("ascii")
res = self._get_info(namespace, index)
return GIBaseInfo._cast(res)
def get_n_infos(self, namespace):
if PY3:
namespace = namespace.encode("ascii")
return self._get_n_infos(namespace)
def find_by_gtype(self, gtype):
res = self._find_by_gtype(gtype)
if not res:
return None
return GIBaseInfo._cast(res)
def find_by_name(self, namespace, name):
if PY3:
namespace = namespace.encode("ascii")
name = name.encode("ascii")
res = self._find_by_name(namespace, name)
if not res:
return None
return GIBaseInfo._cast(res)
def require(self, namespace, version, flags):
if PY3:
namespace = namespace.encode("ascii")
if version is not None:
version = version.encode("ascii")
with gerror(GIError) as error:
return self._require(namespace, version, flags, error)
def require_private(self, typelib_dir, namespace, version, flags):
if PY3:
namespace = namespace.encode("ascii")
if version is not None:
version = version.encode("ascii")
with gerror(GIError) as error:
return self._require_private(
typelib_dir, namespace, version, flags, error)
def enumerate_versions(self, namespace):
if PY3:
namespace = namespace.encode("ascii")
glist = self._enumerate_versions(namespace)
res = unpack_glist(glist, c_char_p)
if PY3:
res = [b.decode("ascii") for b in res]
return res
def get_loaded_namespaces(self):
res = self._get_loaded_namespaces()
res = unpack_nullterm_array(res)
if PY3:
res = [b.decode("ascii") for b in res]
return res
def get_immediate_dependencies(self, namespace):
try:
# https://bugzilla.gnome.org/show_bug.cgi?id=743782
immediate_dependencies = self._get_immediate_dependencies
except AttributeError:
immediate_dependencies = self._get_dependencies
if PY3:
namespace = namespace.encode("ascii")
res = immediate_dependencies(namespace)
if not res:
return []
res = unpack_nullterm_array(res)
if PY3:
res = [p.decode("ascii") for p in res]
return res
def get_search_path(self):
res = self._get_search_path()
paths = unpack_glist(res, c_char_p, transfer_full=False)
return [fsdecode(p) for p in paths]
def get_shared_library(self, namespace):
if PY3:
namespace = namespace.encode("ascii")
lib = self._get_shared_library(namespace)
return lib.decode("ascii") if lib else lib
return self._get_shared_library(namespace)
def get_typelib_path(self, namespace):
if PY3:
namespace = namespace.encode("ascii")
path = self._get_typelib_path(namespace)
if path is not None:
return fsdecode(path)
def get_version(self, namespace):
if PY3:
namespace = namespace.encode("ascii")
return self._get_version(namespace).decode("ascii")
return self._get_version(namespace)
def is_registered(self, namespace, version=None):
if PY3:
namespace = namespace.encode("ascii")
if version is not None:
version = version.encode("ascii")
return self._is_registered(namespace, version)
def get_c_prefix(self, namespace):
if PY3:
namespace = namespace.encode("ascii")
res = self._get_c_prefix(namespace)
if PY3:
res = res.decode("ascii")
return res
def load_typelib(self, typelib, flags):
with gerror(GIError) as error:
res = self._load_typelib(typelib, flags, error)
if PY3:
res = res.decode("ascii")
return res
_methods = [
("get_default", GIRepository, []),
("_require", GITypelib, [GIRepository, gchar_p, gchar_p,
GIRepositoryLoadFlags,
POINTER(GErrorPtr)]),
("_find_by_name", GIBaseInfo,
[GIRepository, gchar_p, gchar_p], False),
("prepend_search_path", None, [c_char_p]),
("prepend_library_path", None, [c_char_p]),
("_get_search_path", GSListPtr, []),
("_load_typelib", c_char_p,
[GIRepository, GITypelib, GIRepositoryLoadFlags,
POINTER(GErrorPtr)]),
("_is_registered", gboolean, [GIRepository, gchar_p, gchar_p]),
("_require_private",
GITypelib, [GIRepository, gchar_p, gchar_p, gchar_p,
GIRepositoryLoadFlags, POINTER(GErrorPtr)]),
("_get_dependencies", POINTER(gchar_p), [GIRepository, gchar_p]),
("_get_immediate_dependencies", POINTER(gchar_p), [GIRepository, gchar_p]),
("_get_loaded_namespaces", POINTER(gchar_p), [GIRepository]),
("_find_by_gtype", GIBaseInfo, [GIRepository, GType], False),
("_get_n_infos", gint, [GIRepository, gchar_p]),
("_get_info", GIBaseInfo, [GIRepository, gchar_p, gint], False),
("_get_typelib_path", gchar_p, [GIRepository, gchar_p]),
("_get_shared_library", gchar_p, [GIRepository, gchar_p]),
("_get_version", gchar_p, [GIRepository, gchar_p]),
("get_option_group", GOptionGroupPtr, [], True),
("_get_c_prefix", gchar_p, [GIRepository, gchar_p]),
("_enumerate_versions", GListPtr, [GIRepository, gchar_p]),
]
wrap_class(_gir, None, GIRepository, "g_irepository_", _methods)
__all__ = ["GIRepositoryLoadFlags", "GIRepository", "GIRepositoryError"]
| gpl-3.0 |
gratefulfrog/ArduGuitar | Ardu3/DraftDevt/Pyboard/AD75019GUITester/Version01/tst.py | 1 | 3708 | #!/usr/local/bin/python3.4
# server_01.py
"""
This reproduces the Arduino functionality from Arduino_v01.ino
this requires updated boot.py
----
# boot.py -- run on boot-up
# can run arbitrary Python, but best to keep it minimal
import pyb
pyb.main('main.py') # main script to run after this one
pyb.usb_mode('CDC+MSC') # act as a serial and a storage device
#pyb.usb_mode('CDC+HID') # act as a serial device and a mouse
-----
"""
import pyb,spiMgr
SPI_ON_X = True
""" unused
def val2String(val,strLen):
res = ''
for i in range(strLen):
v = (val & (1<<(strLen-1-i)))
res+= ('1' if v else '0')
return res
"""
def charFromSer(ser):
return chr(ser.read(1)[0])
class App:
def __init__(self,SPI_ON_X):
self.spi = spiMgr.ad75019SPIMgr(SPI_ON_X,
('X5' if SPI_ON_X else 'Y5'))
#self.baudrate = 115200
self.loopPauseTime = 200 # millisecs
self.contactChar = '|' #bytes('|' ,'utf8')
self.pollChar = 'p' #bytes('p' ,'utf8')
self.executeChar = 'x' #bytes('x' ,'utf8')
# states
self.uninitState = 1
self.contactState = 2
self.pollState = 3
self.executeState = 4
# unused self.bitVecNBBytes = 32
self.nbPins = 16
self.nbBits = 256
self.execIncomingLength = self.nbPins + self.nbBits
# incoming message processing
self.incomingBits = '0' * self.nbBits
self.replyReady = False
self.serial=pyb.USB_VCP()
self.setState(self.uninitState)
self.establishContact()
self.setState(self.contactState)
self.loop()
def setState(self,state):
leds= range(1,5)
[pyb.LED(i).off() for i in leds]
self.currentState = state
pyb.LED(state).on()
def establishContact(self):
found = False
while not found:
while not self.serial.any():
self.serial.write(self.contactChar)
pyb.delay(self.loopPauseTime)
read = charFromSer(self.serial)
if read == self.contactChar:
found = True
def loop(self):
while True:
if self.serial.any():
self.processIncoming()
if self.replyReady:
self.sendReply()
self.replyReady = False
def processIncoming(self):
incomingChar = charFromSer(self.serial)
if (self.currentState == self.contactState or
self.currentState == self.pollState):
if incomingChar == self.pollChar:
self.setState(self.pollState)
pyb.delay(100) # only to be able to see the led flash! remove in operational code
self.replyReady = True
self.setState(self.contactState)
elif incomingChar == self.executeChar:
self.incomingBits = ''
self.setState(self.executeState)
elif self.currentState == self.executeState:
self.incomingBits += incomingChar
if len(self.incomingBits) == self.execIncomingLength:
self.execIncoming()
self.replyReady = True
self.setState(self.contactState)
def execIncoming(self):
self.setConnections()
def setConnections(self):
#print('outgoingBits: ' + self.incomingBits)
#print(type(self.incomingBits[0]))
self.spi.sendString(self.incomingBits[self.nbPins:])
def sendReply(self):
reply= '0' * self.nbPins
reply += self.incomingBits
self.serial.write(reply)
| gpl-2.0 |
chouseknecht/ansible | lib/ansible/modules/packaging/os/apt_rpm.py | 79 | 4726 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Evgenii Terechkov
# Written by Evgenii Terechkov <[email protected]>
# Based on urpmi module written by Philippe Makowski <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apt_rpm
short_description: apt_rpm package manager
description:
- Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
version_added: "1.5"
options:
pkg:
description:
- name of package to install, upgrade or remove.
required: true
state:
description:
- Indicates the desired package state.
choices: [ absent, present ]
default: present
update_cache:
description:
- update the package database first C(apt-get update).
type: bool
default: 'no'
author:
- Evgenii Terechkov (@evgkrsk)
'''
EXAMPLES = '''
- name: Install package foo
apt_rpm:
pkg: foo
state: present
- name: Remove package foo
apt_rpm:
pkg: foo
state: absent
- name: Remove packages foo and bar
apt_rpm:
pkg: foo,bar
state: absent
# bar will be the updated if a newer version exists
- name: Update the package database and install bar
apt_rpm:
name: bar
state: present
update_cache: yes
'''
import json
import os
import shlex
import sys
from ansible.module_utils.basic import AnsibleModule
APT_PATH = "/usr/bin/apt-get"
RPM_PATH = "/usr/bin/rpm"
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name))
if rc == 0:
return True
else:
return False
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
return rc == 0
def update_package_db(module):
rc, out, err = module.run_command("%s update" % APT_PATH)
if rc != 0:
module.fail_json(msg="could not update package db: %s" % err)
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package))
if rc != 0:
module.fail_json(msg="failed to remove %s: %s" % (package, err))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgspec):
packages = ""
for package in pkgspec:
if not query_package_provides(module, package):
packages += "'%s' " % package
if len(packages) != 0:
rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
installed = True
for packages in pkgspec:
if not query_package_provides(module, package):
installed = False
# apt-rpm always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
else:
module.exit_json(changed=True, msg="%s present(s)" % packages)
else:
module.exit_json(changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='installed', choices=['absent', 'installed', 'present', 'removed']),
update_cache=dict(type='bool', default=False, aliases=['update-cache']),
package=dict(type='str', required=True, aliases=['name', 'pkg']),
),
)
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
if p['update_cache']:
update_package_db(module)
packages = p['package'].split(',')
if p['state'] in ['installed', 'present']:
install_packages(module, packages)
elif p['state'] in ['absent', 'removed']:
remove_packages(module, packages)
if __name__ == '__main__':
main()
| gpl-3.0 |
BeyondTheClouds/nova | nova/tests/unit/objects/test_service.py | 5 | 21411 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
from nova.compute import manager as compute_manager
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import aggregate
from nova.objects import fields
from nova.objects import service
from nova import test
from nova.tests.unit.objects import test_compute_node
from nova.tests.unit.objects import test_objects
NOW = timeutils.utcnow().replace(microsecond=0)
def _fake_service(**kwargs):
fake_service = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'host': 'fake-host',
'binary': 'nova-fake',
'topic': 'fake-service-topic',
'report_count': 1,
'forced_down': False,
'disabled': False,
'disabled_reason': None,
'last_seen_up': None,
'version': service.SERVICE_VERSION,
}
fake_service.update(kwargs)
return fake_service
fake_service = _fake_service()
OPTIONAL = ['availability_zone', 'compute_node']
class _TestServiceObject(object):
def supported_hv_specs_comparator(self, expected, obj_val):
obj_val = [inst.to_list() for inst in obj_val]
self.assertJsonEqual(expected, obj_val)
def pci_device_pools_comparator(self, expected, obj_val):
obj_val = obj_val.obj_to_primitive()
self.assertJsonEqual(expected, obj_val)
def comparators(self):
return {'stats': self.assertJsonEqual,
'host_ip': self.assertJsonEqual,
'supported_hv_specs': self.supported_hv_specs_comparator,
'pci_device_pools': self.pci_device_pools_comparator}
def subs(self):
return {'supported_hv_specs': 'supported_instances',
'pci_device_pools': 'pci_stats'}
def _test_query(self, db_method, obj_method, *args, **kwargs):
self.mox.StubOutWithMock(db, db_method)
db_exception = kwargs.pop('db_exception', None)
if db_exception:
getattr(db, db_method)(self.context, *args, **kwargs).AndRaise(
db_exception)
else:
getattr(db, db_method)(self.context, *args, **kwargs).AndReturn(
fake_service)
self.mox.ReplayAll()
obj = getattr(service.Service, obj_method)(self.context, *args,
**kwargs)
if db_exception:
self.assertIsNone(obj)
else:
self.compare_obj(obj, fake_service, allow_missing=OPTIONAL)
def test_get_by_id(self):
self._test_query('service_get', 'get_by_id', 123)
def test_get_by_host_and_topic(self):
self._test_query('service_get_by_host_and_topic',
'get_by_host_and_topic', 'fake-host', 'fake-topic')
def test_get_by_host_and_binary(self):
self._test_query('service_get_by_host_and_binary',
'get_by_host_and_binary', 'fake-host', 'fake-binary')
def test_get_by_host_and_binary_raises(self):
self._test_query('service_get_by_host_and_binary',
'get_by_host_and_binary', 'fake-host', 'fake-binary',
db_exception=exception.HostBinaryNotFound(
host='fake-host', binary='fake-binary'))
def test_get_by_compute_host(self):
self._test_query('service_get_by_compute_host', 'get_by_compute_host',
'fake-host')
def test_get_by_args(self):
self._test_query('service_get_by_host_and_binary', 'get_by_args',
'fake-host', 'fake-binary')
def test_create(self):
self.mox.StubOutWithMock(db, 'service_create')
db.service_create(self.context, {'host': 'fake-host',
'version': fake_service['version']}
).AndReturn(fake_service)
self.mox.ReplayAll()
service_obj = service.Service(context=self.context)
service_obj.host = 'fake-host'
service_obj.create()
self.assertEqual(fake_service['id'], service_obj.id)
self.assertEqual(service.SERVICE_VERSION, service_obj.version)
def test_recreate_fails(self):
self.mox.StubOutWithMock(db, 'service_create')
db.service_create(self.context, {'host': 'fake-host',
'version': fake_service['version']}
).AndReturn(fake_service)
self.mox.ReplayAll()
service_obj = service.Service(context=self.context)
service_obj.host = 'fake-host'
service_obj.create()
self.assertRaises(exception.ObjectActionError, service_obj.create)
def test_save(self):
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(self.context, 123,
{'host': 'fake-host',
'version': fake_service['version']}
).AndReturn(fake_service)
self.mox.ReplayAll()
service_obj = service.Service(context=self.context)
service_obj.id = 123
service_obj.host = 'fake-host'
service_obj.save()
self.assertEqual(service.SERVICE_VERSION, service_obj.version)
@mock.patch.object(db, 'service_create',
return_value=fake_service)
def test_set_id_failure(self, db_mock):
service_obj = service.Service(context=self.context,
binary='nova-compute')
service_obj.create()
self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr,
service_obj, 'id', 124)
def _test_destroy(self):
self.mox.StubOutWithMock(db, 'service_destroy')
db.service_destroy(self.context, 123)
self.mox.ReplayAll()
service_obj = service.Service(context=self.context)
service_obj.id = 123
service_obj.destroy()
def test_destroy(self):
# The test harness needs db.service_destroy to work,
# so avoid leaving it broken here after we're done
orig_service_destroy = db.service_destroy
try:
self._test_destroy()
finally:
db.service_destroy = orig_service_destroy
def test_get_by_topic(self):
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
db.service_get_all_by_topic(self.context, 'fake-topic').AndReturn(
[fake_service])
self.mox.ReplayAll()
services = service.ServiceList.get_by_topic(self.context, 'fake-topic')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
@mock.patch('nova.db.service_get_all_by_binary')
def test_get_by_binary(self, mock_get):
mock_get.return_value = [fake_service]
services = service.ServiceList.get_by_binary(self.context,
'fake-binary')
self.assertEqual(1, len(services))
mock_get.assert_called_once_with(self.context,
'fake-binary',
include_disabled=False)
@mock.patch('nova.db.service_get_all_by_binary')
def test_get_by_binary_disabled(self, mock_get):
mock_get.return_value = [_fake_service(disabled=True)]
services = service.ServiceList.get_by_binary(self.context,
'fake-binary',
include_disabled=True)
self.assertEqual(1, len(services))
mock_get.assert_called_once_with(self.context,
'fake-binary',
include_disabled=True)
@mock.patch('nova.db.service_get_all_by_binary')
def test_get_by_binary_both(self, mock_get):
mock_get.return_value = [_fake_service(),
_fake_service(disabled=True)]
services = service.ServiceList.get_by_binary(self.context,
'fake-binary',
include_disabled=True)
self.assertEqual(2, len(services))
mock_get.assert_called_once_with(self.context,
'fake-binary',
include_disabled=True)
def test_get_by_host(self):
self.mox.StubOutWithMock(db, 'service_get_all_by_host')
db.service_get_all_by_host(self.context, 'fake-host').AndReturn(
[fake_service])
self.mox.ReplayAll()
services = service.ServiceList.get_by_host(self.context, 'fake-host')
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
def test_get_all(self):
self.mox.StubOutWithMock(db, 'service_get_all')
db.service_get_all(self.context, disabled=False).AndReturn(
[fake_service])
self.mox.ReplayAll()
services = service.ServiceList.get_all(self.context, disabled=False)
self.assertEqual(1, len(services))
self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
def test_get_all_with_az(self):
self.mox.StubOutWithMock(db, 'service_get_all')
self.mox.StubOutWithMock(aggregate.AggregateList,
'get_by_metadata_key')
db.service_get_all(self.context, disabled=None).AndReturn(
[dict(fake_service, topic='compute')])
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'availability_zone': 'test-az'}
agg.create()
agg.hosts = [fake_service['host']]
aggregate.AggregateList.get_by_metadata_key(self.context,
'availability_zone', hosts=set(agg.hosts)).AndReturn([agg])
self.mox.ReplayAll()
services = service.ServiceList.get_all(self.context, set_zones=True)
self.assertEqual(1, len(services))
self.assertEqual('test-az', services[0].availability_zone)
def test_compute_node(self):
fake_compute_node = objects.ComputeNode._from_db_object(
self.context, objects.ComputeNode(),
test_compute_node.fake_compute_node)
self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all_by_host')
objects.ComputeNodeList.get_all_by_host(
self.context, 'fake-host').AndReturn(
[fake_compute_node])
self.mox.ReplayAll()
service_obj = service.Service(id=123, host="fake-host",
binary="nova-compute")
service_obj._context = self.context
self.assertEqual(service_obj.compute_node,
fake_compute_node)
# Make sure it doesn't re-fetch this
service_obj.compute_node
def test_load_when_orphaned(self):
service_obj = service.Service()
service_obj.id = 123
self.assertRaises(exception.OrphanedObjectError,
getattr, service_obj, 'compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
def test_obj_make_compatible_for_compute_node(self, get_all_by_host):
service_obj = objects.Service(context=self.context)
fake_service_dict = fake_service.copy()
fake_compute_obj = objects.ComputeNode(host=fake_service['host'],
service_id=fake_service['id'])
get_all_by_host.return_value = [fake_compute_obj]
versions = ovo_base.obj_tree_get_versions('Service')
versions['ComputeNode'] = '1.10'
service_obj.obj_make_compatible_from_manifest(fake_service_dict, '1.9',
versions)
self.assertEqual(
fake_compute_obj.obj_to_primitive(target_version='1.10',
version_manifest=versions),
fake_service_dict['compute_node'])
@mock.patch('nova.db.service_get_minimum_version')
def test_get_minimum_version_none(self, mock_get):
mock_get.return_value = None
self.assertEqual(0,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
mock_get.assert_called_once_with(self.context, 'nova-compute')
@mock.patch('nova.db.service_get_minimum_version')
def test_get_minimum_version(self, mock_get):
mock_get.return_value = 123
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
mock_get.assert_called_once_with(self.context, 'nova-compute')
@mock.patch('nova.db.service_get_minimum_version')
@mock.patch('nova.objects.service.LOG')
def test_get_minimum_version_checks_binary(self, mock_log, mock_get):
mock_get.return_value = None
self.assertEqual(0,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
self.assertFalse(mock_log.warning.called)
self.assertRaises(exception.ObjectActionError,
objects.Service.get_minimum_version,
self.context,
'compute')
self.assertTrue(mock_log.warning.called)
@mock.patch('nova.db.service_get_minimum_version')
def test_get_minimum_version_with_caching(self, mock_get):
objects.Service.enable_min_version_cache()
mock_get.return_value = 123
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
self.assertEqual({"nova-compute": 123},
objects.Service._MIN_VERSION_CACHE)
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
mock_get.assert_called_once_with(self.context, 'nova-compute')
objects.Service._SERVICE_VERSION_CACHING = False
objects.Service.clear_min_version_cache()
@mock.patch('nova.db.service_get_minimum_version', return_value=2)
def test_create_above_minimum(self, mock_get):
with mock.patch('nova.objects.service.SERVICE_VERSION',
new=3):
objects.Service(context=self.context,
binary='nova-compute').create()
@mock.patch('nova.db.service_get_minimum_version', return_value=2)
def test_create_equal_to_minimum(self, mock_get):
with mock.patch('nova.objects.service.SERVICE_VERSION',
new=2):
objects.Service(context=self.context,
binary='nova-compute').create()
@mock.patch('nova.db.service_get_minimum_version', return_value=2)
def test_create_below_minimum(self, mock_get):
with mock.patch('nova.objects.service.SERVICE_VERSION',
new=1):
self.assertRaises(exception.ServiceTooOld,
objects.Service(context=self.context,
binary='nova-compute',
).create)
class TestServiceObject(test_objects._LocalTest,
_TestServiceObject):
pass
class TestRemoteServiceObject(test_objects._RemoteTest,
_TestServiceObject):
pass
class TestServiceVersion(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
super(TestServiceVersion, self).setUp()
def _collect_things(self):
data = {
'compute_rpc': compute_manager.ComputeManager.target.version,
}
return data
def test_version(self):
calculated = self._collect_things()
self.assertEqual(
len(service.SERVICE_VERSION_HISTORY), service.SERVICE_VERSION + 1,
'Service version %i has no history. Please update '
'nova.objects.service.SERVICE_VERSION_HISTORY '
'and add %s to it' % (service.SERVICE_VERSION, repr(calculated)))
current = service.SERVICE_VERSION_HISTORY[service.SERVICE_VERSION]
self.assertEqual(
current, calculated,
'Changes detected that require a SERVICE_VERSION change. Please '
'increment nova.objects.service.SERVICE_VERSION, and make sure it'
'is equal to nova.compute.manager.ComputeManager.target.version.')
def test_version_in_init(self):
self.assertRaises(exception.ObjectActionError,
objects.Service,
version=123)
def test_version_set_on_init(self):
self.assertEqual(service.SERVICE_VERSION,
objects.Service().version)
def test_version_loaded_from_db(self):
fake_version = fake_service['version'] + 1
fake_different_service = dict(fake_service)
fake_different_service['version'] = fake_version
obj = objects.Service()
obj._from_db_object(self.ctxt, obj, fake_different_service)
self.assertEqual(fake_version, obj.version)
def test_save_noop_with_only_version(self):
o = objects.Service(context=self.ctxt, id=fake_service['id'])
o.obj_reset_changes(['id'])
self.assertEqual(set(['version']), o.obj_what_changed())
with mock.patch('nova.db.service_update') as mock_update:
o.save()
self.assertFalse(mock_update.called)
o.host = 'foo'
with mock.patch('nova.db.service_update') as mock_update:
mock_update.return_value = fake_service
o.save()
mock_update.assert_called_once_with(
self.ctxt, fake_service['id'],
{'version': service.SERVICE_VERSION,
'host': 'foo'})
class TestServiceStatusNotification(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
super(TestServiceStatusNotification, self).setUp()
@mock.patch('nova.objects.service.ServiceStatusNotification')
def _verify_notification(self, service_obj, mock_notification):
service_obj.save()
self.assertTrue(mock_notification.called)
event_type = mock_notification.call_args[1]['event_type']
priority = mock_notification.call_args[1]['priority']
publisher = mock_notification.call_args[1]['publisher']
payload = mock_notification.call_args[1]['payload']
self.assertEqual(service_obj.host, publisher.host)
self.assertEqual(service_obj.binary, publisher.binary)
self.assertEqual(fields.NotificationPriority.INFO, priority)
self.assertEqual('service', event_type.object)
self.assertEqual(fields.NotificationAction.UPDATE,
event_type.action)
for field in service.ServiceStatusPayload.SCHEMA:
if field in fake_service:
self.assertEqual(fake_service[field], getattr(payload, field))
mock_notification.return_value.emit.assert_called_once_with(self.ctxt)
@mock.patch('nova.db.service_update')
def test_service_update_with_notification(self, mock_db_service_update):
service_obj = objects.Service(context=self.ctxt, id=fake_service['id'])
mock_db_service_update.return_value = fake_service
for key, value in {'disabled': True,
'disabled_reason': 'my reason',
'forced_down': True}.items():
setattr(service_obj, key, value)
self._verify_notification(service_obj)
@mock.patch('nova.objects.service.ServiceStatusNotification')
@mock.patch('nova.db.service_update')
def test_service_update_without_notification(self,
mock_db_service_update,
mock_notification):
service_obj = objects.Service(context=self.ctxt, id=fake_service['id'])
mock_db_service_update.return_value = fake_service
for key, value in {'report_count': 13,
'last_seen_up': timeutils.utcnow()}.items():
setattr(service_obj, key, value)
service_obj.save()
self.assertFalse(mock_notification.called)
| apache-2.0 |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 10 | 60830 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column_lib as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertClassificationPredictions(
self, expected_len, n_classes, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, range(n_classes))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=300)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, n_classes, predicted_proba)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def _assertRegressionOutputs(
self, predictions, expected_shape):
predictions_nparray = np.array(predictions)
self.assertAllEqual(expected_shape, predictions_nparray.shape)
self.assertTrue(np.issubdtype(predictions_nparray.dtype, np.floating))
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self._assertRegressionOutputs(predicted_scores, [3])
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self._assertRegressionOutputs(predicted_scores, [3])
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
yeming233/rally | tests/unit/plugins/openstack/services/image/test_glance_common.py | 1 | 4258 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from glanceclient import exc as glance_exc
import mock
from rally import exceptions
from rally.plugins.openstack import service
from rally.plugins.openstack.services.image import glance_common
from rally.plugins.openstack.services.image import image
from tests.unit import test
class FullGlance(service.Service, glance_common.GlanceMixin):
"""Implementation of GlanceMixin with Service base class."""
pass
class GlanceMixinTestCase(test.TestCase):
def setUp(self):
super(GlanceMixinTestCase, self).setUp()
self.clients = mock.MagicMock()
self.glance = self.clients.glance.return_value
self.name_generator = mock.MagicMock()
self.version = "some"
self.service = FullGlance(
clients=self.clients, name_generator=self.name_generator)
self.service.version = self.version
def test__get_client(self):
self.assertEqual(self.glance,
self.service._get_client())
def test_get_image(self):
image = "image_id"
self.assertEqual(self.glance.images.get.return_value,
self.service.get_image(image))
self.glance.images.get.assert_called_once_with(image)
def test_get_image_exception(self):
image_id = "image_id"
self.glance.images.get.side_effect = glance_exc.HTTPNotFound
self.assertRaises(exceptions.GetResourceNotFound,
self.service.get_image, image_id)
def test_delete_image(self):
image = "image_id"
self.service.delete_image(image)
self.glance.images.delete.assert_called_once_with(image)
class FullUnifiedGlance(glance_common.UnifiedGlanceMixin,
service.Service):
"""Implementation of UnifiedGlanceMixin with Service base class."""
pass
class UnifiedGlanceMixinTestCase(test.TestCase):
def setUp(self):
super(UnifiedGlanceMixinTestCase, self).setUp()
self.clients = mock.MagicMock()
self.name_generator = mock.MagicMock()
self.impl = mock.MagicMock()
self.version = "some"
self.service = FullUnifiedGlance(
clients=self.clients, name_generator=self.name_generator)
self.service._impl = self.impl
self.service.version = self.version
def test__unify_image(self):
class Image(object):
def __init__(self, visibility=None, is_public=None, status=None):
self.id = uuid.uuid4()
self.name = str(uuid.uuid4())
self.visibility = visibility
self.is_public = is_public
self.status = status
visibility = "private"
image_obj = Image(visibility=visibility)
unified_image = self.service._unify_image(image_obj)
self.assertIsInstance(unified_image, image.UnifiedImage)
self.assertEqual(image_obj.id, unified_image.id)
self.assertEqual(image_obj.visibility, unified_image.visibility)
image_obj = Image(is_public="public")
del image_obj.visibility
unified_image = self.service._unify_image(image_obj)
self.assertEqual(image_obj.id, unified_image.id)
self.assertEqual(image_obj.is_public, unified_image.visibility)
def test_get_image(self):
image_id = "image_id"
self.service.get_image(image=image_id)
self.service._impl.get_image.assert_called_once_with(image=image_id)
def test_delete_image(self):
image_id = "image_id"
self.service.delete_image(image_id)
self.service._impl.delete_image.assert_called_once_with(
image_id=image_id)
| apache-2.0 |
lbartoletti/QGIS | python/plugins/processing/algs/grass7/ext/i_colors_enhance.py | 45 | 1440 | # -*- coding: utf-8 -*-
"""
***************************************************************************
i_colors_enhance.py
-------------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from .i import exportInputRasters
def processCommand(alg, parameters, context, feedback):
# Temporary remove outputs:
alg.processCommand(parameters, context, feedback, True)
def processOutputs(alg, parameters, context, feedback):
# Input rasters are output rasters
rasterDic = {'red': 'redoutput', 'green': 'greenoutput', 'blue': 'blueoutput'}
exportInputRasters(alg, parameters, context, rasterDic)
| gpl-2.0 |
orbitfp7/nova | nova/virt/hyperv/volumeutils.py | 2 | 4877 | # Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
Official Microsoft iSCSI Initiator and iSCSI command line interface
documentation can be retrieved at:
http://www.microsoft.com/en-us/download/details.aspx?id=34750
"""
import re
import time
from oslo_config import cfg
from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VolumeUtils(basevolumeutils.BaseVolumeUtils):
def __init__(self):
super(VolumeUtils, self).__init__()
def execute(self, *args, **kwargs):
stdout_value, stderr_value = utils.execute(*args, **kwargs)
if stdout_value.find('The operation completed successfully') == -1:
raise vmutils.HyperVException(_('An error has occurred when '
'calling the iscsi initiator: %s')
% stdout_value)
return stdout_value
def _login_target_portal(self, target_portal):
(target_address,
target_port) = utils.parse_server_string(target_portal)
output = self.execute('iscsicli.exe', 'ListTargetPortals')
pattern = r'Address and Socket *: (.*)'
portals = [addr.split() for addr in re.findall(pattern, output)]
LOG.debug("Ensuring connection to portal: %s" % target_portal)
if [target_address, str(target_port)] in portals:
self.execute('iscsicli.exe', 'RefreshTargetPortal',
target_address, target_port)
else:
# Adding target portal to iscsi initiator. Sending targets
self.execute('iscsicli.exe', 'AddTargetPortal',
target_address, target_port,
'*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*',
'*', '*')
def login_storage_target(self, target_lun, target_iqn, target_portal,
auth_username=None, auth_password=None):
"""Ensure that the target is logged in."""
self._login_target_portal(target_portal)
# Listing targets
self.execute('iscsicli.exe', 'ListTargets')
retry_count = CONF.hyperv.volume_attach_retry_count
# If the target is not connected, at least two iterations are needed:
# one for performing the login and another one for checking if the
# target was logged in successfully.
if retry_count < 2:
retry_count = 2
for attempt in xrange(retry_count):
try:
session_info = self.execute('iscsicli.exe', 'SessionList')
if session_info.find(target_iqn) == -1:
# Sending login
self.execute('iscsicli.exe', 'qlogintarget', target_iqn,
auth_username, auth_password)
else:
return
except vmutils.HyperVException as exc:
LOG.debug("Attempt %(attempt)d to connect to target "
"%(target_iqn)s failed. Retrying. "
"Exceptipn: %(exc)s ",
{'target_iqn': target_iqn,
'exc': exc,
'attempt': attempt})
time.sleep(CONF.hyperv.volume_attach_retry_interval)
raise vmutils.HyperVException(_('Failed to login target %s') %
target_iqn)
def logout_storage_target(self, target_iqn):
"""Logs out storage target through its session id."""
sessions = self._conn_wmi.query("SELECT * FROM "
"MSiSCSIInitiator_SessionClass "
"WHERE TargetName='%s'" % target_iqn)
for session in sessions:
self.execute_log_out(session.SessionId)
def execute_log_out(self, session_id):
"""Executes log out of the session described by its session ID."""
self.execute('iscsicli.exe', 'logouttarget', session_id)
| apache-2.0 |
willemneal/Docky | lib/pygments/styles/murphy.py | 135 | 2751 | # -*- coding: utf-8 -*-
"""
pygments.styles.murphy
~~~~~~~~~~~~~~~~~~~~~~
Murphy's style from CodeRay.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class MurphyStyle(Style):
"""
Murphy's style from CodeRay.
"""
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "#666 italic",
Comment.Preproc: "#579 noitalic",
Comment.Special: "#c00 bold",
Keyword: "bold #289",
Keyword.Pseudo: "#08f",
Keyword.Type: "#66f",
Operator: "#333",
Operator.Word: "bold #000",
Name.Builtin: "#072",
Name.Function: "bold #5ed",
Name.Class: "bold #e9e",
Name.Namespace: "bold #0e84b5",
Name.Exception: "bold #F00",
Name.Variable: "#036",
Name.Variable.Instance: "#aaf",
Name.Variable.Class: "#ccf",
Name.Variable.Global: "#f84",
Name.Constant: "bold #5ed",
Name.Label: "bold #970",
Name.Entity: "#800",
Name.Attribute: "#007",
Name.Tag: "#070",
Name.Decorator: "bold #555",
String: "bg:#e0e0ff",
String.Char: "#88F bg:",
String.Doc: "#D42 bg:",
String.Interpol: "bg:#eee",
String.Escape: "bold #666",
String.Regex: "bg:#e0e0ff #000",
String.Symbol: "#fc8 bg:",
String.Other: "#f88",
Number: "bold #60E",
Number.Integer: "bold #66f",
Number.Float: "bold #60E",
Number.Hex: "bold #058",
Number.Oct: "bold #40E",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "#F00 bg:#FAA"
}
| mit |
michael-dev2rights/ansible | lib/ansible/modules/windows/_win_msi.py | 36 | 2953 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_msi
version_added: '1.7'
deprecated: In 2.4 and will be removed in 2.8, use M(win_package) instead.
short_description: Installs and uninstalls Windows MSI files
description:
- Installs or uninstalls a Windows MSI file that is already located on the
target server.
options:
path:
description:
- File system path to the MSI file to install
required: true
extra_args:
description:
- Additional arguments to pass to the msiexec.exe command.
state:
description:
- Whether the MSI file should be installed or uninstalled.
choices: [ absent, present ]
default: present
creates:
description:
- Path to a file created by installing the MSI to prevent from
attempting to reinstall the package on every run.
removes:
description:
- Path to a file removed by uninstalling the MSI to prevent from
attempting to re-uninstall the package on every run.
version_added: '2.4'
wait:
description:
- Specify whether to wait for install or uninstall to complete before continuing.
type: bool
default: 'no'
version_added: '2.1'
notes:
- This module is not idempotent and will report a change every time.
Use the C(creates) and C(removes) options to your advantage.
- Please look into M(win_package) instead, this package will be deprecated in the future.
author:
- Matt Martz (@sivel)
'''
EXAMPLES = r'''
- name: Install an MSI file
win_msi:
path: C:\7z920-x64.msi
- name: Install an MSI, and wait for it to complete before continuing
win_msi:
path: C:\7z920-x64.msi
wait: yes
- name: Uninstall an MSI file
win_msi:
path: C:\7z920-x64.msi
state: absent
'''
RETURN = r'''
log:
description: The logged output from the installer
returned: always
type: string
sample: N/A
'''
| gpl-3.0 |
jbenden/ansible | lib/ansible/module_utils/urls.py | 9 | 44724 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]>, 2015
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The match_hostname function and supporting code is under the terms and
# conditions of the Python Software Foundation License. They were taken from
# the Python3 standard library and adapted for use in Python2. See comments in the
# source for which code precisely is under this License. PSF License text
# follows:
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
# retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
'''
The **urls** utils module offers a replacement for the urllib2 python library.
urllib2 is the python stdlib way to retrieve files from the Internet but it
lacks some security features (around verifying SSL certificates) that users
should care about in most situations. Using the functions in this module corrects
deficiencies in the urllib2 module wherever possible.
There are also third-party libraries (for instance, requests) which can be used
to replace urllib2 with a more secure library. However, all third party libraries
require that the library be installed on the managed machine. That is an extra step
for users making use of a module. If possible, avoid third party libraries by using
this code instead.
'''
import base64
import netrc
import os
import platform
import re
import socket
import sys
import tempfile
import traceback
try:
import httplib
except ImportError:
# Python 3
import http.client as httplib
import ansible.module_utils.six.moves.http_cookiejar as cookiejar
import ansible.module_utils.six.moves.urllib.request as urllib_request
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible.module_utils.basic import get_distribution
from ansible.module_utils._text import to_bytes, to_native, to_text
try:
# python3
import urllib.request as urllib_request
from urllib.request import AbstractHTTPHandler
except ImportError:
# python2
import urllib2 as urllib_request
from urllib2 import AbstractHTTPHandler
try:
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
HAS_URLPARSE = True
except:
HAS_URLPARSE = False
try:
import ssl
HAS_SSL = True
except:
HAS_SSL = False
try:
# SNI Handling needs python2.7.9's SSLContext
from ssl import create_default_context, SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
# SNI Handling for python < 2.7.9 with urllib3 support
try:
# urllib3>=1.15
HAS_URLLIB3_SSL_WRAP_SOCKET = False
try:
from urllib3.contrib.pyopenssl import PyOpenSSLContext
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext
HAS_URLLIB3_PYOPENSSLCONTEXT = True
except ImportError:
# urllib3<1.15,>=1.6
HAS_URLLIB3_PYOPENSSLCONTEXT = False
try:
try:
from urllib3.contrib.pyopenssl import ssl_wrap_socket
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket
HAS_URLLIB3_SSL_WRAP_SOCKET = True
except ImportError:
pass
# Select a protocol that includes all secure tls protocols
# Exclude insecure ssl protocols if possible
if HAS_SSL:
# If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
PROTOCOL = ssl.PROTOCOL_TLSv1
if not HAS_SSLCONTEXT and HAS_SSL:
try:
import ctypes
import ctypes.util
except ImportError:
# python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
pass
else:
libssl_name = ctypes.util.find_library('ssl')
libssl = ctypes.CDLL(libssl_name)
for method in ('TLSv1_1_method', 'TLSv1_2_method'):
try:
libssl[method]
# Found something - we'll let openssl autonegotiate and hope
# the server has disabled sslv2 and 3. best we can do.
PROTOCOL = ssl.PROTOCOL_SSLv23
break
except AttributeError:
pass
del libssl
LOADED_VERIFY_LOCATIONS = set()
HAS_MATCH_HOSTNAME = True
try:
from ssl import match_hostname, CertificateError
except ImportError:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
HAS_MATCH_HOSTNAME = False
if not HAS_MATCH_HOSTNAME:
# The following block of code is under the terms and conditions of the
# Python Software Foundation License
"""The match_hostname() function from Python 3.4, essential when using SSL."""
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or subjectAltName fields were found")
# End of Python Software Foundation Licensed code
HAS_MATCH_HOSTNAME = True
# This is a dummy cacert provided for Mac OS since you need at least 1
# ca cert, regardless of validity, for Python on Mac OS to use the
# keychain functionality in OpenSSL for validating SSL certificates.
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE-----
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
-----END CERTIFICATE-----
"""
#
# Exceptions
#
class ConnectionError(Exception):
"""Failed to connect to the server"""
pass
class ProxyError(ConnectionError):
"""Failure to connect because of a proxy"""
pass
class SSLValidationError(ConnectionError):
"""Failure to connect due to SSL validation failing"""
pass
class NoSSLError(SSLValidationError):
"""Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
pass
# Some environments (Google Compute Engine's CoreOS deploys) do not compile
# against openssl and thus do not have any HTTPS support.
CustomHTTPSConnection = CustomHTTPSHandler = None
if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'):
class CustomHTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self.context = None
if HAS_SSLCONTEXT:
self.context = create_default_context()
elif HAS_URLLIB3_PYOPENSSLCONTEXT:
self.context = PyOpenSSLContext(PROTOCOL)
if self.context and self.cert_file:
self.context.load_cert_chain(self.cert_file, self.key_file)
def connect(self):
"Connect to a host on a given (SSL) port."
if hasattr(self, 'source_address'):
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
else:
sock = socket.create_connection((self.host, self.port), self.timeout)
server_hostname = self.host
# Note: self._tunnel_host is not available on py < 2.6 but this code
# isn't used on py < 2.6 (lack of create_connection)
if self._tunnel_host:
self.sock = sock
self._tunnel()
server_hostname = self._tunnel_host
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, certfile=self.cert_file, ssl_version=PROTOCOL,
server_hostname=server_hostname)
else:
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
class CustomHTTPSHandler(urllib_request.HTTPSHandler):
def https_open(self, req):
return self.do_open(CustomHTTPSConnection, req)
https_request = AbstractHTTPHandler.do_request_
class HTTPSClientAuthHandler(urllib_request.HTTPSHandler):
'''Handles client authentication via cert/key
This is a fairly lightweight extension on HTTPSHandler, and can be used
in place of HTTPSHandler
'''
def __init__(self, client_cert=None, client_key=None, **kwargs):
urllib_request.HTTPSHandler.__init__(self, **kwargs)
self.client_cert = client_cert
self.client_key = client_key
def https_open(self, req):
return self.do_open(self._build_https_connection, req)
def _build_https_connection(self, host, **kwargs):
kwargs.update({
'cert_file': self.client_cert,
'key_file': self.client_key,
})
try:
kwargs['context'] = self._context
except AttributeError:
pass
return httplib.HTTPSConnection(host, **kwargs)
def generic_urlparse(parts):
'''
Returns a dictionary of url parts as parsed by urlparse,
but accounts for the fact that older versions of that
library do not support named attributes (ie. .netloc)
'''
generic_parts = dict()
if hasattr(parts, 'netloc'):
# urlparse is newer, just read the fields straight
# from the parts object
generic_parts['scheme'] = parts.scheme
generic_parts['netloc'] = parts.netloc
generic_parts['path'] = parts.path
generic_parts['params'] = parts.params
generic_parts['query'] = parts.query
generic_parts['fragment'] = parts.fragment
generic_parts['username'] = parts.username
generic_parts['password'] = parts.password
generic_parts['hostname'] = parts.hostname
generic_parts['port'] = parts.port
else:
# we have to use indexes, and then parse out
# the other parts not supported by indexing
generic_parts['scheme'] = parts[0]
generic_parts['netloc'] = parts[1]
generic_parts['path'] = parts[2]
generic_parts['params'] = parts[3]
generic_parts['query'] = parts[4]
generic_parts['fragment'] = parts[5]
# get the username, password, etc.
try:
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
match = netloc_re.match(parts[1])
auth = match.group(1)
hostname = match.group(2)
port = match.group(3)
if port:
# the capture group for the port will include the ':',
# so remove it and convert the port to an integer
port = int(port[1:])
if auth:
# the capture group above includes the @, so remove it
# and then split it up based on the first ':' found
auth = auth[:-1]
username, password = auth.split(':', 1)
else:
username = password = None
generic_parts['username'] = username
generic_parts['password'] = password
generic_parts['hostname'] = hostname
generic_parts['port'] = port
except:
generic_parts['username'] = None
generic_parts['password'] = None
generic_parts['hostname'] = parts[1]
generic_parts['port'] = None
return generic_parts
class RequestWithMethod(urllib_request.Request):
'''
Workaround for using DELETE/PUT/etc with urllib2
Originally contained in library/net_infrastructure/dnsmadeeasy
'''
def __init__(self, url, method, data=None, headers=None):
if headers is None:
headers = {}
self._method = method.upper()
urllib_request.Request.__init__(self, url, data, headers)
def get_method(self):
if self._method:
return self._method
else:
return urllib_request.Request.get_method(self)
def RedirectHandlerFactory(follow_redirects=None, validate_certs=True):
"""This is a class factory that closes over the value of
``follow_redirects`` so that the RedirectHandler class has access to
that value without having to use globals, and potentially cause problems
where ``open_url`` or ``fetch_url`` are used multiple times in a module.
"""
class RedirectHandler(urllib_request.HTTPRedirectHandler):
"""This is an implementation of a RedirectHandler to match the
functionality provided by httplib2. It will utilize the value of
``follow_redirects`` that is passed into ``RedirectHandlerFactory``
to determine how redirects should be handled in urllib2.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
handler = maybe_add_ssl_handler(newurl, validate_certs)
if handler:
urllib_request._opener.add_handler(handler)
if follow_redirects == 'urllib2':
return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
elif follow_redirects in ['no', 'none', False]:
raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp)
do_redirect = False
if follow_redirects in ['all', 'yes', True]:
do_redirect = (code >= 300 and code < 400)
elif follow_redirects == 'safe':
m = req.get_method()
do_redirect = (code >= 300 and code < 400 and m in ('GET', 'HEAD'))
if do_redirect:
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in ("content-length", "content-type"))
try:
# Python 2-3.3
origin_req_host = req.get_origin_req_host()
except AttributeError:
# Python 3.4+
origin_req_host = req.origin_req_host
return urllib_request.Request(newurl,
headers=newheaders,
origin_req_host=origin_req_host,
unverifiable=True)
else:
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
return RedirectHandler
def build_ssl_validation_error(hostname, port, paths, exc=None):
'''Inteligently build out the SSLValidationError based on what support
you have installed
'''
msg = [
('Failed to validate the SSL certificate for %s:%s.'
' Make sure your managed systems have a valid CA'
' certificate installed.')
]
if not HAS_SSLCONTEXT:
msg.append('If the website serving the url uses SNI you need'
' python >= 2.7.9 on your managed machine')
msg.append(' (the python executable used (%s) is version: %s)' %
(sys.executable, ''.join(sys.version.splitlines())))
if not HAS_URLLIB3_PYOPENSSLCONTEXT or not HAS_URLLIB3_SSL_WRAP_SOCKET:
msg.append('or you can install the `urllib3`, `pyOpenSSL`,'
' `ndg-httpsclient`, and `pyasn1` python modules')
msg.append('to perform SNI verification in python >= 2.6.')
msg.append('You can use validate_certs=False if you do'
' not need to confirm the servers identity but this is'
' unsafe and not recommended.'
' Paths checked for this platform: %s.')
if exc:
msg.append('The exception msg was: %s.' % to_native(exc))
raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths)))
class SSLValidationHandler(urllib_request.BaseHandler):
'''
A custom handler class for SSL validation.
Based on:
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
http://techknack.net/python-urllib2-handlers/
'''
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
def get_ca_certs(self):
# tries to find a valid CA cert in one of the
# standard locations for the current distribution
ca_certs = []
paths_checked = []
system = to_text(platform.system(), errors='surrogate_or_strict')
# build a list of paths to check for .crt/.pem files
# based on the platform type
paths_checked.append('/etc/ssl/certs')
if system == u'Linux':
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
paths_checked.append('/etc/pki/tls/certs')
paths_checked.append('/usr/share/ca-certificates/cacert.org')
elif system == u'FreeBSD':
paths_checked.append('/usr/local/share/certs')
elif system == u'OpenBSD':
paths_checked.append('/etc/ssl')
elif system == u'NetBSD':
ca_certs.append('/etc/openssl/certs')
elif system == u'SunOS':
paths_checked.append('/opt/local/etc/openssl/certs')
# fall back to a user-deployed cert in a standard
# location if the OS platform one is not available
paths_checked.append('/etc/ansible')
tmp_fd, tmp_path = tempfile.mkstemp()
to_add_fd, to_add_path = tempfile.mkstemp()
to_add = False
# Write the dummy ca cert if we are running on Mac OS X
if system == u'Darwin':
os.write(tmp_fd, b_DUMMY_CA_CERT)
# Default Homebrew path for OpenSSL certs
paths_checked.append('/usr/local/etc/openssl')
# for all of the paths, find any .crt or .pem files
# and compile them into single temp file for use
# in the ssl check to speed up the test
for path in paths_checked:
if os.path.exists(path) and os.path.isdir(path):
dir_contents = os.listdir(path)
for f in dir_contents:
full_path = os.path.join(path, f)
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'):
try:
cert_file = open(full_path, 'rb')
cert = cert_file.read()
cert_file.close()
os.write(tmp_fd, cert)
os.write(tmp_fd, b'\n')
if full_path not in LOADED_VERIFY_LOCATIONS:
to_add = True
os.write(to_add_fd, cert)
os.write(to_add_fd, b'\n')
LOADED_VERIFY_LOCATIONS.add(full_path)
except (OSError, IOError):
pass
if not to_add:
to_add_path = None
return (tmp_path, to_add_path, paths_checked)
def validate_proxy_response(self, response, valid_codes=[200]):
'''
make sure we get back a valid code from the proxy
'''
try:
(http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
if int(resp_code) not in valid_codes:
raise Exception
except:
raise ProxyError('Connection to proxy failed')
def detect_no_proxy(self, url):
'''
Detect if the 'no_proxy' environment variable is set and honor those locations.
'''
env_no_proxy = os.environ.get('no_proxy')
if env_no_proxy:
env_no_proxy = env_no_proxy.split(',')
netloc = urlparse(url).netloc
for host in env_no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# Our requested URL matches something in no_proxy, so don't
# use the proxy for this
return False
return True
def _make_context(self, to_add_ca_cert_path):
if HAS_URLLIB3_PYOPENSSLCONTEXT:
context = PyOpenSSLContext(PROTOCOL)
else:
context = create_default_context()
if to_add_ca_cert_path:
context.load_verify_locations(to_add_ca_cert_path)
return context
def http_request(self, req):
tmp_ca_cert_path, to_add_ca_cert_path, paths_checked = self.get_ca_certs()
https_proxy = os.environ.get('https_proxy')
context = None
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
context = self._make_context(to_add_ca_cert_path)
# Detect if 'no_proxy' environment variable is set and if our URL is included
use_proxy = self.detect_no_proxy(req.get_full_url())
if not use_proxy:
# ignore proxy settings for this host request
return req
try:
if https_proxy:
proxy_parts = generic_urlparse(urlparse(https_proxy))
port = proxy_parts.get('port') or 443
s = socket.create_connection((proxy_parts.get('hostname'), port))
if proxy_parts.get('scheme') == 'http':
s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
if proxy_parts.get('username'):
credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', ''))
s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip())
s.sendall(b'\r\n')
connect_result = b""
while connect_result.find(b"\r\n\r\n") <= 0:
connect_result += s.recv(4096)
# 128 kilobytes of headers should be enough for everyone.
if len(connect_result) > 131072:
raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.')
self.validate_proxy_response(connect_result)
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
else:
raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
else:
s = socket.create_connection((self.hostname, self.port))
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
# close the ssl connection
# ssl_s.unwrap()
s.close()
except (ssl.SSLError, CertificateError) as e:
build_ssl_validation_error(self.hostname, self.port, paths_checked, e)
except socket.error as e:
raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e)))
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
os.remove(tmp_ca_cert_path)
except:
pass
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
if to_add_ca_cert_path:
os.remove(to_add_ca_cert_path)
except:
pass
return req
https_request = http_request
def maybe_add_ssl_handler(url, validate_certs):
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse(url)
if parsed[0] == 'https' and validate_certs:
if not HAS_SSL:
raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
' however this is unsafe and not recommended')
# do the cert validation
netloc = parsed[1]
if '@' in netloc:
netloc = netloc.split('@', 1)[1]
if ':' in netloc:
hostname, port = netloc.split(':', 1)
port = int(port)
else:
hostname = netloc
port = 443
# create the SSL validation handler and
# add it to the list of handlers
return SSLValidationHandler(hostname, port)
def open_url(url, data=None, headers=None, method=None, use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=False, follow_redirects='urllib2',
client_cert=None, client_key=None, cookies=None):
'''
Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
Does not require the module environment
'''
handlers = []
ssl_handler = maybe_add_ssl_handler(url, validate_certs)
if ssl_handler:
handlers.append(ssl_handler)
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse(url)
if parsed[0] != 'ftp':
username = url_username
if headers is None:
headers = {}
if username:
password = url_password
netloc = parsed[1]
elif '@' in parsed[1]:
credentials, netloc = parsed[1].split('@', 1)
if ':' in credentials:
username, password = credentials.split(':', 1)
else:
username = credentials
password = ''
parsed = list(parsed)
parsed[1] = netloc
# reconstruct url without credentials
url = urlunparse(parsed)
if username and not force_basic_auth:
passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
passman.add_password(None, netloc, username, password)
# because we have put None at the start it will always
# use this username/password combination for urls
# for which `theurl` is a super-url
authhandler = urllib_request.HTTPBasicAuthHandler(passman)
digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)
# create the AuthHandler
handlers.append(authhandler)
handlers.append(digest_authhandler)
elif username and force_basic_auth:
headers["Authorization"] = basic_auth_header(username, password)
else:
try:
rc = netrc.netrc(os.environ.get('NETRC'))
login = rc.authenticators(parsed[1])
except IOError:
login = None
if login:
username, _, password = login
if username and password:
headers["Authorization"] = basic_auth_header(username, password)
if not use_proxy:
proxyhandler = urllib_request.ProxyHandler({})
handlers.append(proxyhandler)
if HAS_SSLCONTEXT and not validate_certs:
# In 2.7.9, the default context validates certificates
context = SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key,
context=context))
elif client_cert:
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key))
# pre-2.6 versions of python cannot use the custom https
# handler, since the socket class is lacking create_connection.
# Some python builds lack HTTPS support.
if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
handlers.append(CustomHTTPSHandler)
handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs))
# add some nicer cookie handling
if cookies is not None:
handlers.append(urllib_request.HTTPCookieProcessor(cookies))
opener = urllib_request.build_opener(*handlers)
urllib_request.install_opener(opener)
data = to_bytes(data, nonstring='passthru')
if method:
if method.upper() not in ('OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT', 'PATCH'):
raise ConnectionError('invalid HTTP request method; %s' % method.upper())
request = RequestWithMethod(url, method.upper(), data)
else:
request = urllib_request.Request(url, data)
# add the custom agent header, to help prevent issues
# with sites that block the default urllib agent string
if http_agent:
request.add_header('User-agent', http_agent)
# Cache control
# Either we directly force a cache refresh
if force:
request.add_header('cache-control', 'no-cache')
# or we do it if the original is more recent than our copy
elif last_mod_time:
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
request.add_header('If-Modified-Since', tstamp)
# user defined headers now, which may override things we've set above
if headers:
if not isinstance(headers, dict):
raise ValueError("headers provided to fetch_url() must be a dict")
for header in headers:
request.add_header(header, headers[header])
urlopen_args = [request, None]
if sys.version_info >= (2, 6, 0):
# urlopen in python prior to 2.6.0 did not
# have a timeout parameter
urlopen_args.append(timeout)
r = urllib_request.urlopen(*urlopen_args)
return r
#
# Module-related functions
#
def basic_auth_header(username, password):
"""Takes a username and password and returns a byte string suitable for
using as value of an Authorization header to do basic auth.
"""
return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict'))
def url_argument_spec():
'''
Creates an argument spec that can be used with any module
that will be requesting content via urllib/urllib2
'''
return dict(
url=dict(),
force=dict(default='no', aliases=['thirsty'], type='bool'),
http_agent=dict(default='ansible-httpget'),
use_proxy=dict(default='yes', type='bool'),
validate_certs=dict(default='yes', type='bool'),
url_username=dict(required=False),
url_password=dict(required=False, no_log=True),
force_basic_auth=dict(required=False, type='bool', default='no'),
client_cert=dict(required=False, type='path', default=None),
client_key=dict(required=False, type='path', default=None),
)
def fetch_url(module, url, data=None, headers=None, method=None,
use_proxy=True, force=False, last_mod_time=None, timeout=10):
"""Sends a request via HTTP(S) or FTP (needs the module as parameter)
:arg module: The AnsibleModule (used to get username, password etc. (s.b.).
:arg url: The url to use.
:kwarg data: The data to be sent (in case of POST/PUT).
:kwarg headers: A dict with the request headers.
:kwarg method: "POST", "PUT", etc.
:kwarg boolean use_proxy: Default: True
:kwarg boolean force: If True: Do not get a cached copy (Default: False)
:kwarg last_mod_time: Default: None
:kwarg int timeout: Default: 10
:returns: A tuple of (**response**, **info**). Use ``response.body()`` to read the data.
The **info** contains the 'status' and other meta data. When a HttpError (status > 400)
occurred then ``info['body']`` contains the error response data::
Example::
data={...}
resp, info = fetch_url(module,
"http://example.com",
data=module.jsonify(data)
header={Content-type': 'application/json'},
method="POST")
status_code = info["status"]
body = resp.read()
if status_code >= 400 :
body = info['body']
"""
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
# Get validate_certs from the module params
validate_certs = module.params.get('validate_certs', True)
username = module.params.get('url_username', '')
password = module.params.get('url_password', '')
http_agent = module.params.get('http_agent', 'ansible-httpget')
force_basic_auth = module.params.get('force_basic_auth', '')
follow_redirects = module.params.get('follow_redirects', 'urllib2')
client_cert = module.params.get('client_cert')
client_key = module.params.get('client_key')
cookies = cookiejar.LWPCookieJar()
r = None
info = dict(url=url)
try:
r = open_url(url, data=data, headers=headers, method=method,
use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
validate_certs=validate_certs, url_username=username,
url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
follow_redirects=follow_redirects, client_cert=client_cert,
client_key=client_key, cookies=cookies)
info.update(r.info())
# parse the cookies into a nice dictionary
cookie_dict = dict()
for cookie in cookies:
cookie_dict[cookie.name] = cookie.value
info['cookies'] = cookie_dict
# finally update the result with a message about the fetch
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
except NoSSLError as e:
distribution = get_distribution()
if distribution is not None and distribution.lower() == 'redhat':
module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e))
else:
module.fail_json(msg='%s' % to_native(e))
except (ConnectionError, ValueError) as e:
module.fail_json(msg=to_native(e))
except urllib_error.HTTPError as e:
try:
body = e.read()
except AttributeError:
body = ''
# Try to add exception info to the output but don't fail if we can't
try:
info.update(dict(**e.info()))
except:
pass
info.update({'msg': to_native(e), 'body': body, 'status': e.code})
except urllib_error.URLError as e:
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % to_native(e), status=code))
except socket.error as e:
info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1))
except Exception as e:
info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1),
exception=traceback.format_exc())
return r, info
| gpl-3.0 |
janezhango/BigDataMachineLearning | py/testdir_single_jvm/test_GLM2_many_cols_real.py | 2 | 3919 | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_glm
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
# 8 random generatators, 1 per column
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
rowTotal = 0
# having reals makes it less likely to fail to converge?
for j in range(colCount):
ri1 = int(r1.gauss(1,.1))
rowTotal += ri1
rowData.append(ri1 + 0.1) # odd bias shift
# sum the row, and make output 1 if > (5 * rowCount)
if (rowTotal > (0.5 * colCount)):
result = 1
else:
result = 0
rowData.append(result)
### print colCount, rowTotal, result
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost, tryHeap
tryHeap = 14
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1, enable_benchmark_log=True, java_heap_GB=tryHeap)
else:
h2o_hosts.build_cloud_with_hosts(enable_benchmark_log=True)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_many_cols_real(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 1000, 'cA', 100),
(100, 3000, 'cB', 300),
]
### h2b.browseTheCloud()
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + str(SEEDPERFILE) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
start = time.time()
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=60)
elapsed = time.time() - start
print "Parse result['destination_key']:", parseResult['destination_key']
algo = "Parse"
l = '{:d} jvms, {:d}GB heap, {:s} {:s} {:6.2f} secs'.format(
len(h2o.nodes), tryHeap, algo, csvFilename, elapsed)
print l
h2o.cloudPerfH2O.message(l)
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
y = colCount
# just limit to 2 iterations..assume it scales with more iterations
kwargs = {
'response': y,
'max_iter': 2,
'family': 'binomial',
'lambda': 1.e-4,
'alpha': 0.6,
'n_folds': 1,
'beta_epsilon': 1.e-4,
}
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time() - start
h2o.check_sandbox_for_errors()
print "glm end on ", csvPathname, 'took', elapsed, 'seconds', \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
algo = "GLM "
l = '{:d} jvms, {:d}GB heap, {:s} {:s} {:6.2f} secs'.format(
len(h2o.nodes), tryHeap, algo, csvFilename, elapsed)
print l
h2o.cloudPerfH2O.message(l)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
denisff/python-for-android | python3-alpha/python3-src/Lib/test/test_ntpath.py | 57 | 12697 | import ntpath
import os
import sys
from test.support import TestFailed
from test import support, test_genericpath
from tempfile import TemporaryFile
import unittest
def tester(fn, wantResult):
fn = fn.replace("\\", "\\\\")
gotResult = eval(fn)
if wantResult != gotResult:
raise TestFailed("%s should return: %s but returned: %s" \
%(str(fn), str(wantResult), str(gotResult)))
# then with bytes
fn = fn.replace("('", "(b'")
fn = fn.replace('("', '(b"')
fn = fn.replace("['", "[b'")
fn = fn.replace('["', '[b"')
fn = fn.replace(", '", ", b'")
fn = fn.replace(', "', ', b"')
gotResult = eval(fn)
if isinstance(wantResult, str):
wantResult = wantResult.encode('ascii')
elif isinstance(wantResult, tuple):
wantResult = tuple(r.encode('ascii') for r in wantResult)
gotResult = eval(fn)
if wantResult != gotResult:
raise TestFailed("%s should return: %s but returned: %s" \
%(str(fn), str(wantResult), repr(gotResult)))
class TestNtpath(unittest.TestCase):
def test_splitext(self):
tester('ntpath.splitext("foo.ext")', ('foo', '.ext'))
tester('ntpath.splitext("/foo/foo.ext")', ('/foo/foo', '.ext'))
tester('ntpath.splitext(".ext")', ('.ext', ''))
tester('ntpath.splitext("\\foo.ext\\foo")', ('\\foo.ext\\foo', ''))
tester('ntpath.splitext("foo.ext\\")', ('foo.ext\\', ''))
tester('ntpath.splitext("")', ('', ''))
tester('ntpath.splitext("foo.bar.ext")', ('foo.bar', '.ext'))
tester('ntpath.splitext("xx/foo.bar.ext")', ('xx/foo.bar', '.ext'))
tester('ntpath.splitext("xx\\foo.bar.ext")', ('xx\\foo.bar', '.ext'))
tester('ntpath.splitext("c:a/b\\c.d")', ('c:a/b\\c', '.d'))
def test_splitdrive(self):
tester('ntpath.splitdrive("c:\\foo\\bar")',
('c:', '\\foo\\bar'))
tester('ntpath.splitdrive("c:/foo/bar")',
('c:', '/foo/bar'))
tester('ntpath.splitdrive("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint', '\\foo\\bar'))
tester('ntpath.splitdrive("//conky/mountpoint/foo/bar")',
('//conky/mountpoint', '/foo/bar'))
tester('ntpath.splitdrive("\\\\\\conky\\mountpoint\\foo\\bar")',
('', '\\\\\\conky\\mountpoint\\foo\\bar'))
tester('ntpath.splitdrive("///conky/mountpoint/foo/bar")',
('', '///conky/mountpoint/foo/bar'))
tester('ntpath.splitdrive("\\\\conky\\\\mountpoint\\foo\\bar")',
('', '\\\\conky\\\\mountpoint\\foo\\bar'))
tester('ntpath.splitdrive("//conky//mountpoint/foo/bar")',
('', '//conky//mountpoint/foo/bar'))
def test_split(self):
tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
tester('ntpath.split("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint\\foo', 'bar'))
tester('ntpath.split("c:\\")', ('c:\\', ''))
tester('ntpath.split("\\\\conky\\mountpoint\\")',
('\\\\conky\\mountpoint\\', ''))
tester('ntpath.split("c:/")', ('c:/', ''))
tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint/', ''))
def test_isabs(self):
tester('ntpath.isabs("c:\\")', 1)
tester('ntpath.isabs("\\\\conky\\mountpoint\\")', 1)
tester('ntpath.isabs("\\foo")', 1)
tester('ntpath.isabs("\\foo\\bar")', 1)
def test_commonprefix(self):
tester('ntpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
"/home/swen")
tester('ntpath.commonprefix(["\\home\\swen\\spam", "\\home\\swen\\eggs"])',
"\\home\\swen\\")
tester('ntpath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
"/home/swen/spam")
def test_join(self):
tester('ntpath.join("")', '')
tester('ntpath.join("", "", "")', '')
tester('ntpath.join("a")', 'a')
tester('ntpath.join("/a")', '/a')
tester('ntpath.join("\\a")', '\\a')
tester('ntpath.join("a:")', 'a:')
tester('ntpath.join("a:", "b")', 'a:b')
tester('ntpath.join("a:", "/b")', 'a:/b')
tester('ntpath.join("a:", "\\b")', 'a:\\b')
tester('ntpath.join("a", "/b")', '/b')
tester('ntpath.join("a", "\\b")', '\\b')
tester('ntpath.join("a", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a\\", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b\\", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b", "\\c")', '\\c')
tester('ntpath.join("d:\\", "\\pleep")', 'd:\\pleep')
tester('ntpath.join("d:\\", "a", "b")', 'd:\\a\\b')
tester("ntpath.join('c:', '/a')", 'c:/a')
tester("ntpath.join('c:/', '/a')", 'c:/a')
tester("ntpath.join('c:/a', '/b')", '/b')
tester("ntpath.join('c:', 'd:/')", 'd:/')
tester("ntpath.join('c:/', 'd:/')", 'd:/')
tester("ntpath.join('c:/', 'd:/a/b')", 'd:/a/b')
tester("ntpath.join('')", '')
tester("ntpath.join('', '', '', '', '')", '')
tester("ntpath.join('a')", 'a')
tester("ntpath.join('', 'a')", 'a')
tester("ntpath.join('', '', '', '', 'a')", 'a')
tester("ntpath.join('a', '')", 'a\\')
tester("ntpath.join('a', '', '', '', '')", 'a\\')
tester("ntpath.join('a\\', '')", 'a\\')
tester("ntpath.join('a\\', '', '', '', '')", 'a\\')
# from comment in ntpath.join
tester("ntpath.join('c:', '/a')", 'c:/a')
tester("ntpath.join('//computer/share', '/a')", '//computer/share/a')
tester("ntpath.join('c:/', '/a')", 'c:/a')
tester("ntpath.join('//computer/share/', '/a')", '//computer/share/a')
tester("ntpath.join('c:/a', '/b')", '/b')
tester("ntpath.join('//computer/share/a', '/b')", '/b')
tester("ntpath.join('c:', 'd:/')", 'd:/')
tester("ntpath.join('c:', '//computer/share/')", '//computer/share/')
tester("ntpath.join('//computer/share', 'd:/')", 'd:/')
tester("ntpath.join('//computer/share', '//computer/share/')", '//computer/share/')
tester("ntpath.join('c:/', 'd:/')", 'd:/')
tester("ntpath.join('c:/', '//computer/share/')", '//computer/share/')
tester("ntpath.join('//computer/share/', 'd:/')", 'd:/')
tester("ntpath.join('//computer/share/', '//computer/share/')", '//computer/share/')
tester("ntpath.join('c:', '//computer/share/')", '//computer/share/')
tester("ntpath.join('c:/', '//computer/share/')", '//computer/share/')
tester("ntpath.join('c:/', '//computer/share/a/b')", '//computer/share/a/b')
tester("ntpath.join('\\\\computer\\share\\', 'a', 'b')", '\\\\computer\\share\\a\\b')
tester("ntpath.join('\\\\computer\\share', 'a', 'b')", '\\\\computer\\share\\a\\b')
tester("ntpath.join('\\\\computer\\share', 'a\\b')", '\\\\computer\\share\\a\\b')
tester("ntpath.join('//computer/share/', 'a', 'b')", '//computer/share/a\\b')
tester("ntpath.join('//computer/share', 'a', 'b')", '//computer/share\\a\\b')
tester("ntpath.join('//computer/share', 'a/b')", '//computer/share\\a/b')
def test_normpath(self):
tester("ntpath.normpath('A//////././//.//B')", r'A\B')
tester("ntpath.normpath('A/./B')", r'A\B')
tester("ntpath.normpath('A/foo/../B')", r'A\B')
tester("ntpath.normpath('C:A//B')", r'C:A\B')
tester("ntpath.normpath('D:A/./B')", r'D:A\B')
tester("ntpath.normpath('e:A/foo/../B')", r'e:A\B')
tester("ntpath.normpath('C:///A//B')", r'C:\A\B')
tester("ntpath.normpath('D:///A/./B')", r'D:\A\B')
tester("ntpath.normpath('e:///A/foo/../B')", r'e:\A\B')
tester("ntpath.normpath('..')", r'..')
tester("ntpath.normpath('.')", r'.')
tester("ntpath.normpath('')", r'.')
tester("ntpath.normpath('/')", '\\')
tester("ntpath.normpath('c:/')", 'c:\\')
tester("ntpath.normpath('/../.././..')", '\\')
tester("ntpath.normpath('c:/../../..')", 'c:\\')
tester("ntpath.normpath('../.././..')", r'..\..\..')
tester("ntpath.normpath('K:../.././..')", r'K:..\..\..')
tester("ntpath.normpath('C:////a/b')", r'C:\a\b')
tester("ntpath.normpath('//machine/share//a/b')", r'\\machine\share\a\b')
tester("ntpath.normpath('\\\\.\\NUL')", r'\\.\NUL')
tester("ntpath.normpath('\\\\?\\D:/XY\\Z')", r'\\?\D:/XY\Z')
def test_expandvars(self):
with support.EnvironmentVarGuard() as env:
env.clear()
env["foo"] = "bar"
env["{foo"] = "baz1"
env["{foo}"] = "baz2"
tester('ntpath.expandvars("foo")', "foo")
tester('ntpath.expandvars("$foo bar")', "bar bar")
tester('ntpath.expandvars("${foo}bar")', "barbar")
tester('ntpath.expandvars("$[foo]bar")', "$[foo]bar")
tester('ntpath.expandvars("$bar bar")', "$bar bar")
tester('ntpath.expandvars("$?bar")', "$?bar")
tester('ntpath.expandvars("${foo}bar")', "barbar")
tester('ntpath.expandvars("$foo}bar")', "bar}bar")
tester('ntpath.expandvars("${foo")', "${foo")
tester('ntpath.expandvars("${{foo}}")', "baz1}")
tester('ntpath.expandvars("$foo$foo")', "barbar")
tester('ntpath.expandvars("$bar$bar")', "$bar$bar")
tester('ntpath.expandvars("%foo% bar")', "bar bar")
tester('ntpath.expandvars("%foo%bar")', "barbar")
tester('ntpath.expandvars("%foo%%foo%")', "barbar")
tester('ntpath.expandvars("%%foo%%foo%foo%")', "%foo%foobar")
tester('ntpath.expandvars("%?bar%")', "%?bar%")
tester('ntpath.expandvars("%foo%%bar")', "bar%bar")
tester('ntpath.expandvars("\'%foo%\'%bar")', "\'%foo%\'%bar")
def test_abspath(self):
# ntpath.abspath() can only be used on a system with the "nt" module
# (reasonably), so we protect this test with "import nt". This allows
# the rest of the tests for the ntpath module to be run to completion
# on any platform, since most of the module is intended to be usable
# from any platform.
try:
import nt
tester('ntpath.abspath("C:\\")', "C:\\")
except ImportError:
pass
def test_relpath(self):
currentdir = os.path.split(os.getcwd())[-1]
tester('ntpath.relpath("a")', 'a')
tester('ntpath.relpath(os.path.abspath("a"))', 'a')
tester('ntpath.relpath("a/b")', 'a\\b')
tester('ntpath.relpath("../a/b")', '..\\a\\b')
tester('ntpath.relpath("a", "../b")', '..\\'+currentdir+'\\a')
tester('ntpath.relpath("a/b", "../c")', '..\\'+currentdir+'\\a\\b')
tester('ntpath.relpath("a", "b/c")', '..\\..\\a')
tester('ntpath.relpath("c:/foo/bar/bat", "c:/x/y")', '..\\..\\foo\\bar\\bat')
tester('ntpath.relpath("//conky/mountpoint/a", "//conky/mountpoint/b/c")', '..\\..\\a')
tester('ntpath.relpath("a", "a")', '.')
tester('ntpath.relpath("/foo/bar/bat", "/x/y/z")', '..\\..\\..\\foo\\bar\\bat')
tester('ntpath.relpath("/foo/bar/bat", "/foo/bar")', 'bat')
tester('ntpath.relpath("/foo/bar/bat", "/")', 'foo\\bar\\bat')
tester('ntpath.relpath("/", "/foo/bar/bat")', '..\\..\\..')
tester('ntpath.relpath("/foo/bar/bat", "/x")', '..\\foo\\bar\\bat')
tester('ntpath.relpath("/x", "/foo/bar/bat")', '..\\..\\..\\x')
tester('ntpath.relpath("/", "/")', '.')
tester('ntpath.relpath("/a", "/a")', '.')
tester('ntpath.relpath("/a/b", "/a/b")', '.')
tester('ntpath.relpath("c:/foo", "C:/FOO")', '.')
def test_sameopenfile(self):
with TemporaryFile() as tf1, TemporaryFile() as tf2:
# Make sure the same file is really the same
self.assertTrue(ntpath.sameopenfile(tf1.fileno(), tf1.fileno()))
# Make sure different files are really different
self.assertFalse(ntpath.sameopenfile(tf1.fileno(), tf2.fileno()))
# Make sure invalid values don't cause issues on win32
if sys.platform == "win32":
with self.assertRaises(OSError):
# Invalid file descriptors shouldn't display assert
# dialogs (#4804)
ntpath.sameopenfile(-1, -1)
class NtCommonTest(test_genericpath.CommonTest):
pathmodule = ntpath
attributes = ['relpath', 'splitunc']
def test_main():
support.run_unittest(TestNtpath, NtCommonTest)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
vmthunder/nova | nova/tests/scheduler/test_client.py | 17 | 4355 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.conductor import api as conductor_api
from nova import context
from nova import exception
from nova.scheduler import client as scheduler_client
from nova.scheduler.client import query as scheduler_query_client
from nova.scheduler.client import report as scheduler_report_client
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
"""Tests for Scheduler Client."""
class SchedulerReportClientTestCase(test.TestCase):
def setUp(self):
super(SchedulerReportClientTestCase, self).setUp()
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.client = scheduler_report_client.SchedulerReportClient()
def test_constructor(self):
self.assertIsNotNone(self.client.conductor_api)
@mock.patch.object(conductor_api.LocalAPI, 'compute_node_update')
def test_update_compute_node_works(self, mock_cn_update):
stats = {"id": 1, "foo": "bar"}
self.client.update_resource_stats(self.context,
('fakehost', 'fakenode'),
stats)
mock_cn_update.assert_called_once_with(self.context,
{"id": 1},
{"foo": "bar"})
def test_update_compute_node_raises(self):
stats = {"foo": "bar"}
self.assertRaises(exception.ComputeHostNotCreated,
self.client.update_resource_stats,
self.context, ('fakehost', 'fakenode'), stats)
class SchedulerQueryClientTestCase(test.TestCase):
def setUp(self):
super(SchedulerQueryClientTestCase, self).setUp()
self.context = context.get_admin_context()
self.client = scheduler_query_client.SchedulerQueryClient()
def test_constructor(self):
self.assertIsNotNone(self.client.scheduler_rpcapi)
@mock.patch.object(scheduler_rpcapi.SchedulerAPI, 'select_destinations')
def test_select_destinations(self, mock_select_destinations):
self.client.select_destinations(
context=self.context,
request_spec='fake_request_spec',
filter_properties='fake_prop'
)
mock_select_destinations.assert_called_once_with(
self.context,
'fake_request_spec',
'fake_prop')
class SchedulerClientTestCase(test.TestCase):
def setUp(self):
super(SchedulerClientTestCase, self).setUp()
self.client = scheduler_client.SchedulerClient()
def test_constructor(self):
self.assertIsNotNone(self.client.queryclient)
self.assertIsNotNone(self.client.reportclient)
@mock.patch.object(scheduler_query_client.SchedulerQueryClient,
'select_destinations')
def test_select_destinations(self, mock_select_destinations):
self.assertIsNone(self.client.queryclient.instance)
self.client.select_destinations('ctxt', 'fake_spec', 'fake_prop')
self.assertIsNotNone(self.client.queryclient.instance)
mock_select_destinations.assert_called_once_with(
'ctxt', 'fake_spec', 'fake_prop')
@mock.patch.object(scheduler_report_client.SchedulerReportClient,
'update_resource_stats')
def test_update_resource_stats(self, mock_update_resource_stats):
self.assertIsNone(self.client.reportclient.instance)
self.client.update_resource_stats('ctxt', 'fake_name', 'fake_stats')
self.assertIsNotNone(self.client.reportclient.instance)
mock_update_resource_stats.assert_called_once_with(
'ctxt', 'fake_name', 'fake_stats')
| apache-2.0 |
spcui/autotest | setup.py | 1 | 4226 | import os
# High level way of installing each autotest component
import client.setup
import frontend.setup
import cli.setup
import server.setup
import scheduler.setup
import database_legacy.setup
import tko.setup
import utils.setup
import mirror.setup
import installation_support.setup
from distutils.core import setup
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared import version
def _combine_dicts(list_dicts):
result_dict = {}
for d in list_dicts:
for k in d:
result_dict[k] = d[k]
return result_dict
def _fix_data_paths(package_data_dict):
'''
Corrects package data paths
When the package name is compound, and the package contents, that
is, file paths, contain the same path name found in the package
name, setuptools thinks there's an extra directory. This checks
that condition and adjusts (strips) the 1st directory name.
'''
result = {}
for package_name, package_content in package_data_dict.items():
package_structure = package_name.split('.')
package_structure_1st_level = package_structure[1]
result[package_name] = []
for p in package_content:
path_structure = p.split(os.path.sep)
path_structure_1st_level = path_structure[0]
if package_structure_1st_level == path_structure_1st_level:
path = os.path.join(*path_structure[1:])
else:
path = p
result[package_name].append(path)
return result
def get_package_dir():
return _combine_dicts([client.setup.get_package_dir(),
frontend.setup.get_package_dir(),
cli.setup.get_package_dir(),
server.setup.get_package_dir(),
scheduler.setup.get_package_dir(),
database_legacy.setup.get_package_dir(),
tko.setup.get_package_dir(),
utils.setup.get_package_dir(),
mirror.setup.get_package_dir()])
def get_packages():
return (client.setup.get_packages() +
frontend.setup.get_packages() +
cli.setup.get_packages() +
server.setup.get_packages() +
scheduler.setup.get_packages() +
database_legacy.setup.get_packages() +
tko.setup.get_packages() +
utils.setup.get_packages() +
mirror.setup.get_packages() +
installation_support.setup.get_packages())
def get_data_files():
return (client.setup.get_data_files() +
tko.setup.get_data_files() +
utils.setup.get_data_files() +
mirror.setup.get_data_files())
def get_package_data():
return _combine_dicts([
_fix_data_paths(client.setup.get_package_data()),
_fix_data_paths(frontend.setup.get_package_data()),
_fix_data_paths(cli.setup.get_package_data()),
_fix_data_paths(server.setup.get_package_data()),
_fix_data_paths(scheduler.setup.get_package_data()),
_fix_data_paths(database_legacy.setup.get_package_data()),
_fix_data_paths(tko.setup.get_package_data()),
_fix_data_paths(utils.setup.get_package_data())
])
def get_scripts():
return (client.setup.get_scripts() +
frontend.setup.get_scripts() +
cli.setup.get_scripts() +
server.setup.get_scripts() +
scheduler.setup.get_scripts() +
database_legacy.setup.get_scripts() +
tko.setup.get_scripts() +
installation_support.setup.get_scripts())
def run():
setup(name='autotest',
description='Autotest test framework',
maintainer='Lucas Meneghel Rodrigues',
maintainer_email='[email protected]',
version=version.get_version(),
url='http://autotest.github.com',
package_dir=get_package_dir(),
package_data=get_package_data(),
packages= get_packages(),
scripts=get_scripts(),
data_files=get_data_files())
if __name__ == '__main__':
run()
| gpl-2.0 |
rherault-insa/numpy | numpy/distutils/fcompiler/gnu.py | 28 | 14667 | from __future__ import division, absolute_import, print_function
import re
import os
import sys
import warnings
import platform
import tempfile
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77',)
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string = version_string[version_string.find('\n')+1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from the Python Makefile and then we
# fall back to setting it to 10.3 to maximize the set of
# versions we can work with. This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import distutils.sysconfig as sc
g = {}
filename = sc.get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
if target == '10.3':
s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
warnings.warn(s)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
status, output = exec_command(self.compiler_f77 +
['-print-libgcc-file-name'],
use_tee=0)
if not status:
return os.path.dirname(output)
return None
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir,)*4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
# the following code is not needed (read: breaks) when using MinGW
# in case want to link F77 compiled code with MSVC
opt.append('gcc')
runtime_lib = msvc_runtime_library()
if runtime_lib:
opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
else:
opt = ['-O3']
opt.append('-funroll-loops')
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
from distutils import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
return '-Wl,-rpath="%s"' % dir
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran',)
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if v >= '4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe']:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir,)*4))
path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i+1, "mingwex")
opt.insert(i+1, "mingw32")
# XXX: fix this mess, does not work for mingw
if is_win64():
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
status, output = exec_command(self.compiler_f77 +
['-v'],
use_tee=0)
if not status:
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def get_flags_opt(self):
if is_win64():
return ['-O0']
else:
return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = GnuFCompiler()
compiler.customize()
print(compiler.get_version())
try:
compiler = Gnu95FCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)
| bsd-3-clause |
tlksio/tlksio | env/lib/python3.4/site-packages/requests/packages/chardet/escprober.py | 2936 | 3187 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
| mit |
asajeffrey/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_collection.py | 30 | 31894 | from __future__ import absolute_import, division, print_function
import pprint
import sys
import pytest
import _pytest._code
from _pytest.main import Session, EXIT_NOTESTSCOLLECTED, _in_venv
class TestCollector(object):
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_compat_attributes(self, testdir, recwarn):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
recwarn.clear()
assert modcol.Module == pytest.Module
assert modcol.Class == pytest.Class
assert modcol.Item == pytest.Item
assert modcol.File == pytest.File
assert modcol.Function == pytest.Function
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0 # NOQA
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert fn != 3
assert fn != modcol
assert fn != [1, 2, 3]
assert [1, 2, 3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol(
"""
class TestClass(object):
def test_foo():
pass
"""
)
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(
conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
"""
)
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
def test_can_skip_class_with_test_attr(self, testdir):
"""Assure test class is skipped when using `__test__=False` (See #2007)."""
testdir.makepyfile(
"""
class TestFoo(object):
__test__ = False
def __init__(self):
pass
def test_foo():
assert True
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 0 items", "*no tests ran in*"])
class TestCollectFS(object):
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("build", "test_notfound.py")
tmpdir.ensure("dist", "test_notfound.py")
tmpdir.ensure("_darcs", "test_notfound.py")
tmpdir.ensure("CVS", "test_notfound.py")
tmpdir.ensure("{arch}", "test_notfound.py")
tmpdir.ensure(".whatever", "test_notfound.py")
tmpdir.ensure(".bzr", "test_notfound.py")
tmpdir.ensure("normal", "test_found.py")
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
testdir.tmpdir.ensure("virtual", bindir, fname)
testfile = testdir.tmpdir.ensure("virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
# by default, ignore tests inside a virtualenv
result = testdir.runpytest()
assert "test_invenv" not in result.stdout.str()
# allow test collection if user insists
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" in result.stdout.str()
# allow test collection if user directly passes in the directory
result = testdir.runpytest("virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# norecursedirs takes priority
testdir.tmpdir.ensure(".virtual", bindir, fname)
testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" not in result.stdout.str()
# ...unless the virtualenv is explicitly given on the CLI
result = testdir.runpytest("--collect-in-virtualenv", ".virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test__in_venv(self, testdir, fname):
"""Directly test the virtual env detection function"""
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# no bin/activate, not a virtualenv
base_path = testdir.tmpdir.mkdir("venv")
assert _in_venv(base_path) is False
# with bin/activate, totally a virtualenv
base_path.ensure(bindir, fname)
assert _in_venv(base_path) is True
def test_custom_norecursedirs(self, testdir):
testdir.makeini(
"""
[pytest]
norecursedirs = mydir xyz*
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini(
"""
[pytest]
testpaths = gui uts
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems("-v")
assert [x.name for x in items] == ["test_gui", "test_uts"]
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ("env", "gui", "uts"):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ["test_%s" % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ("env", "gui", "uts"):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ["test_%s" % dirname]
class TestCollectPluginHookRelay(object):
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin(object):
def pytest_collect_file(self, path, parent):
if not path.basename.startswith("."):
# Ignore hidden files, e.g. .testmondata.
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == ".abc"
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin(object):
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback(object):
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*hello world*"])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_make_collect_report():
outcome = yield
rep = outcome.get_result()
rep.headerlines += ["header1"]
outcome.force_result(rep)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*header1*"])
class TestCustomConftests(object):
def test_ignore_collect_path(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
"""
)
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return True
"""
)
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines("*1 passed*")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines("*collected 0 items*")
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest(
"""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
"""
)
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "passed" not in result.stdout.str()
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
testdir.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
"""
)
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyModule*", "*test_x*"])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest(
"""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
"""
)
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest(
"""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
"""
)
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyModule1*", "*MyModule2*", "*test_x*"])
class TestSession(object):
def test_parsearg(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
assert rcol.fspath == subdir
parts = rcol._parsearg(p.basename)
assert parts[0] == target
assert len(parts) == 1
parts = rcol._parsearg(p.basename + "::test_func")
assert parts[0] == target
assert parts[1] == "test_func"
assert len(parts) == 2
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
assert topdir == rcol.fspath
# rootid = rcol.nodeid
# root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
# assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def get_reported_items(self, hookrec):
"""Return pytest.Item instances reported by the pytest_collectreport hook"""
calls = hookrec.getcalls("pytest_collectreport")
return [
x
for call in calls
for x in call.report.result
if isinstance(x, pytest.Item)
]
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
item, = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
pprint.pprint(hookrec.calls)
topdir = testdir.tmpdir # noqa
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.result[0].name == 'test_func'"),
]
)
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_func"]
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
normid = p.basename + "::TestClass::()::test_method"
for id in [
p.basename,
p.basename + "::TestClass",
p.basename + "::TestClass::()",
normid,
]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest(
"""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
"""
% p.basename
)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == collector.session.fspath"),
(
"pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'",
),
("pytest_collectstart", "collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
]
)
assert len(self.get_reported_items(hookrec)) == 2
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith('aaa/test_aaa.py')"),
]
)
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
]
)
def test_serialization_byid(self, testdir):
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
item, = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
item2, = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
arg = p.basename + "::TestClass::test_method"
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
item, = items
assert item.nodeid.endswith("TestClass::()::test_method")
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
class Test_getinitialnodes(object):
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
with tmpdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == "x.py"
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
with subdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == "x.py"
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems(object):
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile(
"""
def test_1():
pass
def test_2():
pass
"""
)
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile(
"""
def testone():
pass
class TestX(object):
def testmethod_one(self):
pass
class TestY(TestX):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
assert len(items) == 3
assert items[0].name == "testone"
assert items[1].name == "testmethod_one"
assert items[2].name == "testmethod_one"
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_class_and_functions_discovery_using_glob(self, testdir):
"""
tests that python_classes and python_functions config options work
as prefixes and glob-like patterns (issue #600).
"""
testdir.makeini(
"""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
"""
)
p = testdir.makepyfile(
"""
class MyTestSuite(object):
def x_test(self):
pass
class TestCase(object):
def test_y(self):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
ids = [x.getmodpath() for x in items]
assert ids == ["MyTestSuite.x_test", "TestCase.test_y"]
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest(
"""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2(object):
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
"""
)
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines(["*1 passed*"])
class TestNodekeywords(object):
def test_no_under(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
values = list(modcol.keywords)
assert modcol.name in values
for x in values:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile(
"""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
"""
)
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
COLLECTION_ERROR_PY_FILES = dict(
test_01_failure="""
def test_1():
assert False
""",
test_02_import_error="""
import asdfasdfasdf
def test_2():
assert True
""",
test_03_import_error="""
import asdfasdfasdf
def test_3():
assert True
""",
test_04_success="""
def test_4():
assert True
""",
)
def test_exit_on_collection_error(testdir):
"""Verify that all collection errors are collected and no tests executed"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest()
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
]
)
def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):
"""
Verify collection is aborted once maxfail errors are encountered ignoring
further modules which would cause more collection errors.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=1")
assert res.ret == 1
res.stdout.fnmatch_lines(
["*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*"]
)
assert "test_03" not in res.stdout.str()
def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
"""
Verify the test run aborts due to collection errors even if maxfail count of
errors was not reached.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=4")
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
]
)
def test_continue_on_collection_errors(testdir):
"""
Verify tests are executed even when collection errors occur when the
--continue-on-collection-errors flag is set
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors")
assert res.ret == 1
res.stdout.fnmatch_lines(
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"]
)
def test_continue_on_collection_errors_maxfail(testdir):
"""
Verify tests are executed even when collection errors occur and that maxfail
is honoured (including the collection error count).
4 tests: 2 collection errors + 1 failure + 1 success
test_4 is never executed because the test run is with --maxfail=3 which
means it is interrupted after the 2 collection errors + 1 failure.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
assert res.ret == 1
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"])
def test_fixture_scope_sibling_conftests(testdir):
"""Regression test case for https://github.com/pytest-dev/pytest/issues/2836"""
foo_path = testdir.mkpydir("foo")
foo_path.join("conftest.py").write(
_pytest._code.Source(
"""
import pytest
@pytest.fixture
def fix():
return 1
"""
)
)
foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1")
# Tests in `food/` should not see the conftest fixture from `foo/`
food_path = testdir.mkpydir("food")
food_path.join("test_food.py").write("def test_food(fix): assert fix == 1")
res = testdir.runpytest()
assert res.ret == 1
res.stdout.fnmatch_lines(
[
"*ERROR at setup of test_food*",
"E*fixture 'fix' not found",
"*1 passed, 1 error*",
]
)
| mpl-2.0 |
jonboiser/kolibri | .buildkite/upload_artifacts.py | 4 | 7706 | """
# Requirements:
* Generate access token in your Github account, then create environment variable GITHUB_ACCESS_TOKEN.
- e.g export GITHUB_ACCESS_TOKEN=1ns3rt-my-t0k3n-h3re.
* Generate a service account key for your Google API credentials, then create environment variable GOOGLE_APPLICATION_CREDENTIALS.
- e.g export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json.
# Environment Variable/s:
* IS_KOLIBRI_RELEASE = Upload artifacts to the Google Cloud as a release candidate.
* GITHUB_ACCESS_TOKEN = Personal access token used to authenticate in your Github account via API.
* BUILDKITE_BUILD_NUMBER = Build identifier for each directory created.
* BUILDKITE_PULL_REQUEST = Pull request issue or the value is false.
* BUILDKITE_TAG = Tag identifier if this build was built from a tag.
* BUILDKITE_COMMIT = Git commit hash that the build was made from.
* GOOGLE_APPLICATION_CREDENTIALS = Your service account key.
"""
import logging
import os
import sys
from os import listdir
import requests
from gcloud import storage
from github3 import login
logging.getLogger().setLevel(logging.INFO)
ACCESS_TOKEN = os.getenv("GITHUB_ACCESS_TOKEN")
REPO_OWNER = "learningequality"
REPO_NAME = "kolibri"
ISSUE_ID = os.getenv("BUILDKITE_PULL_REQUEST")
BUILD_ID = os.getenv("BUILDKITE_BUILD_NUMBER")
TAG = os.getenv("BUILDKITE_TAG")
COMMIT = os.getenv("BUILDKITE_COMMIT")
RELEASE_DIR = 'release'
PROJECT_PATH = os.path.join(os.getcwd())
# Python packages artifact location
DIST_DIR = os.path.join(PROJECT_PATH, "dist")
# Installer artifact location
INSTALLER_DIR = os.path.join(PROJECT_PATH, "installer")
headers = {'Authorization': 'token %s' % ACCESS_TOKEN}
INSTALLER_CAT = 'Installers'
PYTHON_PKG_CAT = 'Python packages'
# Manifest of files, keyed by extension
file_manifest = {
'exe': {
'extension': 'exe',
'description': 'Windows Installer',
'category': INSTALLER_CAT,
'content_type': 'application/x-ms-dos-executable',
},
'pex': {
'extension': 'pex',
'description': 'Pex file',
'category': PYTHON_PKG_CAT,
'content_type': 'application/octet-stream',
},
'whl': {
'extension': 'whl',
'description': 'Whl file',
'category': PYTHON_PKG_CAT,
'content_type': 'application/zip',
},
'gz': {
'extension': 'gz',
'description': 'Tar file',
'category': PYTHON_PKG_CAT,
'content_type': 'application/gzip',
},
# 'apk': {
# 'extension': 'apk',
# 'description': 'Android Installer',
# 'category': INSTALLER_CAT,
# 'content_type': 'application/vnd.android.package-archive',
# },
}
file_order = [
'exe',
# 'apk',
'pex',
'whl',
'gz',
]
gh = login(token=ACCESS_TOKEN)
repository = gh.repository(REPO_OWNER, REPO_NAME)
def create_status_report_html(artifacts):
"""
Create html page to list build artifacts for linking from github status.
"""
html = "<html>\n<body>\n<h1>Build Artifacts</h1>\n"
current_heading = None
for ext in file_order:
artifact = artifacts[ext]
if artifact['category'] != current_heading:
current_heading = artifact['category']
html += "<h2>{heading}</h2>\n".format(heading=current_heading)
html += "<p>{description}: <a href='{media_url}'>{name}</a></p>\n".format(
**artifact
)
html += "</body>\n</html>"
return html
def create_github_status(report_url):
"""
Create a github status with a link to the report URL,
only do this once buildkite has been successful, so only report success here.
"""
status = repository.create_status(
COMMIT,
"success",
target_url=report_url,
description="Kolibri Buildkite assets",
context="buildkite/kolibri/assets"
)
if status:
logging.info('Successfully created Github status for commit %s.' % COMMIT)
else:
logging.info('Error encounter. Now exiting!')
sys.exit(1)
def collect_local_artifacts():
"""
Create a dict of the artifact name and the location.
"""
artifacts_dict = {}
def create_artifact_data(artifact_dir):
for artifact in listdir(artifact_dir):
filename, file_extension = os.path.splitext(artifact)
# Remove leading '.'
file_extension = file_extension[1:]
if file_extension in file_manifest:
data = {"name": artifact,
"file_location": "%s/%s" % (artifact_dir, artifact)}
data.update(file_manifest[file_extension])
logging.info("Collect file data: (%s)" % data)
artifacts_dict[file_extension] = data
create_artifact_data(DIST_DIR)
create_artifact_data(INSTALLER_DIR)
return artifacts_dict
def upload_artifacts():
"""
Upload the artifacts on the Google Cloud Storage.
Create a comment on the pull requester with artifact media link.
"""
client = storage.Client()
bucket = client.bucket("le-downloads")
artifacts = collect_local_artifacts()
is_release = os.getenv("IS_KOLIBRI_RELEASE")
for file_data in artifacts.values():
logging.info("Uploading file (%s)" % (file_data.get("name")))
if is_release:
blob = bucket.blob('kolibri-%s-%s-%s' % (RELEASE_DIR, BUILD_ID, file_data.get("name")))
else:
blob = bucket.blob('kolibri-buildkite-build-%s-%s-%s' % (ISSUE_ID, BUILD_ID, file_data.get("name")))
blob.upload_from_filename(filename=file_data.get("file_location"))
blob.make_public()
file_data.update({'media_url': blob.media_link})
html = create_status_report_html(artifacts)
blob = bucket.blob('kolibri-%s-%s-report.html' % (RELEASE_DIR, BUILD_ID))
blob.upload_from_string(html, content_type='text/html')
blob.make_public()
create_github_status(blob.public_url)
if TAG:
# Building from a tag, this is probably a release!
# Have to do this with requests because github3 does not support this interface yet
get_release_asset_url = requests.get("https://api.github.com/repos/{owner}/{repo}/releases/tags/{tag}".format(
owner=REPO_OWNER,
repo=REPO_NAME,
tag=TAG,
))
if get_release_asset_url.status_code == 200:
# Definitely a release!
release_id = get_release_asset_url.json()['id']
release_name = get_release_asset_url.json()['name']
release = repository.release(id=release_id)
logging.info("Uploading built assets to Github Release: %s" % release_name)
for file_extension in file_order:
artifact = artifacts[file_extension]
logging.info("Uploading release asset: %s" % (artifact.get("name")))
# For some reason github3 does not let us set a label at initial upload
asset = release.upload_asset(
content_type=artifact['content_type'],
name=artifact['name'],
asset=open(artifact['file_location'], 'rb')
)
if asset:
# So do it after the initial upload instead
asset.edit(artifact['name'], label=artifact['description'])
logging.info("Successfully uploaded release asset: %s" % (artifact.get('name')))
else:
logging.error("Error uploading release asset: %s" % (artifact.get('name')))
def main():
upload_artifacts()
if __name__ == "__main__":
main()
| mit |
hon-po/Marlin | Marlin/scripts/createSpeedLookupTable.py | 333 | 1382 | #!/usr/bin/env python
""" Generate the stepper delay lookup table for Marlin firmware. """
import argparse
__author__ = "Ben Gamari <[email protected]>"
__copyright__ = "Copyright 2012, Ben Gamari"
__license__ = "GPL"
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-f', '--cpu-freq', type=int, default=16, help='CPU clockrate in MHz (default=16)')
parser.add_argument('-d', '--divider', type=int, default=8, help='Timer/counter pre-scale divider (default=8)')
args = parser.parse_args()
cpu_freq = args.cpu_freq * 1000000
timer_freq = cpu_freq / args.divider
print "#ifndef SPEED_LOOKUPTABLE_H"
print "#define SPEED_LOOKUPTABLE_H"
print
print '#include "Marlin.h"'
print
print "const uint16_t speed_lookuptable_fast[256][2] PROGMEM = {"
a = [ timer_freq / ((i*256)+(args.cpu_freq*2)) for i in range(256) ]
b = [ a[i] - a[i+1] for i in range(255) ]
b.append(b[-1])
for i in range(32):
print " ",
for j in range(8):
print "{%d, %d}," % (a[8*i+j], b[8*i+j]),
print
print "};"
print
print "const uint16_t speed_lookuptable_slow[256][2] PROGMEM = {"
a = [ timer_freq / ((i*8)+(args.cpu_freq*2)) for i in range(256) ]
b = [ a[i] - a[i+1] for i in range(255) ]
b.append(b[-1])
for i in range(32):
print " ",
for j in range(8):
print "{%d, %d}," % (a[8*i+j], b[8*i+j]),
print
print "};"
print
print "#endif"
| gpl-3.0 |
jreback/pandas | pandas/core/computation/align.py | 2 | 5999 | """
Core eval alignment algorithms.
"""
from __future__ import annotations
from functools import partial, wraps
from typing import TYPE_CHECKING, Dict, Optional, Sequence, Tuple, Type, Union
import warnings
import numpy as np
from pandas._typing import FrameOrSeries
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.computation.common import result_type_many
if TYPE_CHECKING:
from pandas.core.indexes.api import Index
def _align_core_single_unary_op(
term,
) -> Tuple[Union[partial, Type[FrameOrSeries]], Optional[Dict[str, Index]]]:
typ: Union[partial, Type[FrameOrSeries]]
axes: Optional[Dict[str, Index]] = None
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
if hasattr(term.value, "axes"):
axes = _zip_axes_from_type(typ, term.value.axes)
return typ, axes
def _zip_axes_from_type(
typ: Type[FrameOrSeries], new_axes: Sequence[Index]
) -> Dict[str, Index]:
return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)}
def _any_pandas_objects(terms) -> bool:
"""
Check a sequence of terms for instances of PandasObject.
"""
return any(isinstance(term.value, PandasObject) for term in terms)
def _filter_special_cases(f):
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return result_type_many(*term_values), None
return f(terms)
return wrapper
@_filter_special_cases
def _align_core(terms):
term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")]
term_dims = [terms[i].value.ndim for i in term_index]
from pandas import Series
ndims = Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, ABCSeries)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how="outer")
for i, ndim in ndims.items():
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, "reindex"):
transpose = isinstance(ti, ABCSeries) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
if ordm >= 1 and reindexer_size >= 10000:
w = (
f"Alignment difference on axis {axis} is larger "
f"than an order of magnitude on term {repr(terms[i].name)}, "
f"by more than {ordm:.4g}; performance may suffer"
)
warnings.warn(w, category=PerformanceWarning, stacklevel=6)
f = partial(ti.reindex, reindexer, axis=axis, copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
def align_terms(terms):
"""
Align a set of terms.
"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, (ABCSeries, ABCDataFrame)):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes
def reconstruct_object(typ, obj, axes, dtype):
"""
Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if not isinstance(typ, partial) and issubclass(typ, PandasObject):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if (
len(obj.shape) == 1
and len(obj) == 1
and not isinstance(ret_value, np.ndarray)
):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value
| bsd-3-clause |
artemh/asuswrt-merlin | release/src/router/samba36/source3/stf/info3cache.py | 98 | 1673 | #!/usr/bin/python
#
# Upon a winbindd authentication, test that an info3 record is cached in
# netsamlogon_cache.tdb and cache records are removed from winbindd_cache.tdb
#
import comfychair, stf
from samba import tdb, winbind
#
# We want to implement the following test on a win2k native mode domain.
#
# 1. trash netsamlogon_cache.tdb
# 2. wbinfo -r DOMAIN\Administrator [FAIL]
# 3. wbinfo --auth-crap DOMAIN\Administrator%password [PASS]
# 4. wbinfo -r DOMAIN\Administrator [PASS]
#
# Also for step 3 we want to try 'wbinfo --auth-smbd' and
# 'wbinfo --auth-plaintext'
#
#
# TODO: To implement this test we need to be able to
#
# - pass username%password combination for an invidivual winbindd request
# (so we can get the administrator SID so we can clear the info3 cache)
#
# - start/restart winbindd (to trash the winbind cache)
#
# - from samba import dynconfig (to find location of info3 cache)
#
# - be able to modify the winbindd cache (to set/reset individual winbind
# cache entries)
#
# - have --auth-crap present in HEAD
#
class WinbindAuthCrap(comfychair.TestCase):
def runtest(self):
raise comfychair.NotRunError, "not implemented"
class WinbindAuthSmbd(comfychair.TestCase):
def runtest(self):
# Grr - winbindd in HEAD doesn't contain the auth_smbd function
raise comfychair.NotRunError, "no auth_smbd in HEAD"
class WinbindAuthPlaintext(comfychair.TestCase):
def runtest(self):
raise comfychair.NotRunError, "not implemented"
tests = [WinbindAuthCrap, WinbindAuthSmbd, WinbindAuthPlaintext]
if __name__ == "__main__":
comfychair.main(tests)
| gpl-2.0 |
cwisecarver/osf.io | addons/s3/routes.py | 32 | 1947 | from framework.routing import Rule, json_renderer
from addons.s3 import views
api_routes = {
'rules': [
Rule(
[
'/settings/s3/accounts/',
],
'post',
views.s3_add_user_account,
json_renderer,
),
Rule(
[
'/settings/s3/accounts/',
],
'get',
views.s3_account_list,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/settings/',
'/project/<pid>/node/<nid>/s3/settings/',
],
'put',
views.s3_set_config,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/settings/',
'/project/<pid>/node/<nid>/s3/settings/',
],
'get',
views.s3_get_config,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/user-auth/',
'/project/<pid>/node/<nid>/s3/user-auth/',
],
'put',
views.s3_import_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/user-auth/',
'/project/<pid>/node/<nid>/s3/user-auth/',
],
'delete',
views.s3_deauthorize_node,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/buckets/',
'/project/<pid>/node/<nid>/s3/buckets/',
],
'get',
views.s3_folder_list,
json_renderer,
),
Rule(
[
'/project/<pid>/s3/newbucket/',
'/project/<pid>/node/<nid>/s3/newbucket/',
],
'post',
views.create_bucket,
json_renderer
),
],
'prefix': '/api/v1',
}
| apache-2.0 |
ckohl/illumos-kvm-cmd | scripts/qemu-gdb.py | 286 | 2813 | #!/usr/bin/python
# GDB debugging support
#
# Copyright 2012 Red Hat, Inc. and/or its affiliates
#
# Authors:
# Avi Kivity <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# Contributions after 2012-01-13 are licensed under the terms of the
# GNU GPL, version 2 or (at your option) any later version.
import gdb
def isnull(ptr):
return ptr == gdb.Value(0).cast(ptr.type)
def int128(p):
return long(p['lo']) + (long(p['hi']) << 64)
class QemuCommand(gdb.Command):
'''Prefix for QEMU debug support commands'''
def __init__(self):
gdb.Command.__init__(self, 'qemu', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE, True)
class MtreeCommand(gdb.Command):
'''Display the memory tree hierarchy'''
def __init__(self):
gdb.Command.__init__(self, 'qemu mtree', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.queue = []
def invoke(self, arg, from_tty):
self.seen = set()
self.queue_root('address_space_memory')
self.queue_root('address_space_io')
self.process_queue()
def queue_root(self, varname):
ptr = gdb.parse_and_eval(varname)['root']
self.queue.append(ptr)
def process_queue(self):
while self.queue:
ptr = self.queue.pop(0)
if long(ptr) in self.seen:
continue
self.print_item(ptr)
def print_item(self, ptr, offset = gdb.Value(0), level = 0):
self.seen.add(long(ptr))
addr = ptr['addr']
addr += offset
size = int128(ptr['size'])
alias = ptr['alias']
klass = ''
if not isnull(alias):
klass = ' (alias)'
elif not isnull(ptr['ops']):
klass = ' (I/O)'
elif bool(ptr['ram']):
klass = ' (RAM)'
gdb.write('%s%016x-%016x %s%s (@ %s)\n'
% (' ' * level,
long(addr),
long(addr + (size - 1)),
ptr['name'].string(),
klass,
ptr,
),
gdb.STDOUT)
if not isnull(alias):
gdb.write('%s alias: %s@%016x (@ %s)\n' %
(' ' * level,
alias['name'].string(),
ptr['alias_offset'],
alias,
),
gdb.STDOUT)
self.queue.append(alias)
subregion = ptr['subregions']['tqh_first']
level += 1
while not isnull(subregion):
self.print_item(subregion, addr, level)
subregion = subregion['subregions_link']['tqe_next']
QemuCommand()
MtreeCommand()
| gpl-2.0 |
plucury/hyper | hyper/compat.py | 41 | 1775 | # -*- coding: utf-8 -*-
# flake8: noqa
"""
hyper/compat
~~~~~~~~~~~~
Normalizes the Python 2/3 API for internal use.
"""
from contextlib import contextmanager
import sys
import zlib
try:
from . import ssl_compat
except ImportError:
# TODO log?
ssl_compat = None
_ver = sys.version_info
is_py2 = _ver[0] == 2
is_py2_7_9_or_later = _ver[0] >= 2 and _ver[1] >= 7 and _ver[2] >= 9
is_py3 = _ver[0] == 3
is_py3_3 = is_py3 and _ver[1] == 3
@contextmanager
def ignore_missing():
try:
yield
except (AttributeError, NotImplementedError): # pragma: no cover
pass
if is_py2:
if is_py2_7_9_or_later:
import ssl
else:
ssl = ssl_compat
from urllib import urlencode
from urlparse import urlparse, urlsplit
from itertools import imap
def to_byte(char):
return ord(char)
def decode_hex(b):
return b.decode('hex')
def write_to_stdout(data):
sys.stdout.write(data + '\n')
sys.stdout.flush()
# The standard zlib.compressobj() accepts only positional arguments.
def zlib_compressobj(level=6, method=zlib.DEFLATED, wbits=15, memlevel=8,
strategy=zlib.Z_DEFAULT_STRATEGY):
return zlib.compressobj(level, method, wbits, memlevel, strategy)
unicode = unicode
bytes = str
elif is_py3:
from urllib.parse import urlencode, urlparse, urlsplit
imap = map
def to_byte(char):
return char
def decode_hex(b):
return bytes.fromhex(b)
def write_to_stdout(data):
sys.stdout.buffer.write(data + b'\n')
sys.stdout.buffer.flush()
zlib_compressobj = zlib.compressobj
if is_py3_3:
ssl = ssl_compat
else:
import ssl
unicode = str
bytes = bytes
| mit |
rebranch/payanyway-python | payanyway/django/forms.py | 1 | 2502 | # -*- coding:utf-8 -*-
import hashlib
import urllib
from django import forms
from payanyway.api import Api
class MonetaForm(forms.Form, Api):
def _set_param(self, key, value):
if not key in self.fields.keys():
self.fields[key] = forms.CharField(widget=forms.HiddenInput)
self.fields[key].initial = value
#код ответа на уведомление об оплате
def _get_response_code(self):
return 200 if self.is_valid() else 100
#подпись для ответа на уведомление
def _generate_response_signature(self):
"""
MNT_SIGNATURE = MD5(
RESPONSE_CODE + MNT_ID + MNT_TRANSACTION_ID + КОД ПРОВЕРКИ ЦЕЛОСТНОСТИ ДАННЫХ
)
"""
response_code = self._get_response_code()
params = [response_code, self._account_id, self._transaction_id, self._integrity_check_code]
string_to_encode = u''.join(map(unicode, params))
signature = hashlib.md5(string_to_encode).hexdigest()
return signature
def _get_param(self, key):
if key in self.fields.keys():
return self.fields[key].initial
else:
return None
def get_payment_url(self):
data = {}
for field in self.fields:
data[field] = self.fields[field].initial
url = u'{}?{}'.format(
self.action_url,
urllib.urlencode(data)
)
return url
def __init__(self, account_id=None, transaction_id=None, amount=0, integrity_check_code=None,
use_signature=False, currency_code=u'RUB', test_mode=False, payment_system=None, test_server=True,
custom1=None, custom2=None, custom3=None,
*args, **kwargs):
super(MonetaForm, self).__init__(*args, **kwargs)
self._account_id = account_id
self._transaction_id = transaction_id
self._currency_code = currency_code
self._test_mode = test_mode
self._amount = amount
self._integrity_check_code = integrity_check_code
self._use_test_server = test_server
if custom1:
self._custom1 = custom1
if custom2:
self._custom2 = custom2
if custom3:
self._custom3 = custom3
if payment_system:
self._payment_system = payment_system
if use_signature:
self._request_signature = self._generate_request_signature() | mit |
nuuuboo/odoo | addons/l10n_cr/__init__.py | 438 | 2045 | # -*- encoding: utf-8 -*-
##############################################################################
#
# __init__.py
# l10n_cr_account
# First author: Carlos Vásquez <[email protected]> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
realsaiko/odoo | addons/gamification_sale_crm/__openerp__.py | 320 | 1426 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'CRM Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'depends': ['gamification','sale_crm'],
'website' : 'https://www.odoo.com/page/gamification',
'description': """Example of goal definitions and challenges that can be used related to the usage of the CRM Sale module.""",
'data': ['sale_crm_goals.xml'],
'demo': ['sale_crm_goals_demo.xml'],
'auto_install': True,
}
| agpl-3.0 |
biodrone/plex-desk | desk/flask/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/connectionpool.py | 155 | 29526 | import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError, ConnectionError
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
# httplib doesn't like it when we include brackets in ipv6 addresses
self.host = host.strip('[]')
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
try:
# Request a connection from the queue.
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Close the connection. If a connection is reused on which there
# was a Certificate error, the next request will certainly raise
# another Certificate error.
if conn:
conn.close()
conn = None
raise SSLError(e)
except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
if conn:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
conn.close()
conn = None
stacktrace = sys.exc_info()[2]
if isinstance(e, SocketError) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e,
_pool=self, _stacktrace=stacktrace)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=response.status):
retries = retries.increment(method, url, response=response, _pool=self)
retries.sleep()
log.info("Forced retry: %s" % url)
return self.urlopen(method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
**conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.org/en/latest/security.html'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| mit |
mihailignatenko/erp | addons/marketing_campaign/__init__.py | 380 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import marketing_campaign
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andnovar/networkx | networkx/algorithms/approximation/dominating_set.py | 45 | 4404 | # -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 by
# Nicholas Mancuso <[email protected]>
# All rights reserved.
# BSD license.
"""Functions for finding node and edge dominating sets.
A *`dominating set`_[1] for an undirected graph *G* with vertex set *V*
and edge set *E* is a subset *D* of *V* such that every vertex not in
*D* is adjacent to at least one member of *D*. An *`edge dominating
set`_[2] is a subset *F* of *E* such that every edge not in *F* is
incident to an endpoint of at least one edge in *F*.
.. [1] dominating set: https://en.wikipedia.org/wiki/Dominating_set
.. [2] edge dominating set: https://en.wikipedia.org/wiki/Edge_dominating_set
"""
from __future__ import division
from ..matching import maximal_matching
from ...utils import not_implemented_for
__all__ = ["min_weighted_dominating_set",
"min_edge_dominating_set"]
__author__ = """Nicholas Mancuso ([email protected])"""
# TODO Why doesn't this algorithm work for directed graphs?
@not_implemented_for('directed')
def min_weighted_dominating_set(G, weight=None):
"""Returns a dominating set that approximates the minimum weight node
dominating set.
Parameters
----------
G : NetworkX graph
Undirected graph.
weight : string
The node attribute storing the weight of an edge. If provided,
the node attribute with this key must be a number for each
node. If not provided, each node is assumed to have weight one.
Returns
-------
min_weight_dominating_set : set
A set of nodes, the sum of whose weights is no more than `(\log
w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of
each node in the graph and `w(V^*)` denotes the sum of the
weights of each node in the minimum weight dominating set.
Notes
-----
This algorithm computes an approximate minimum weighted dominating
set for the graph ``G``. The returned solution has weight `(\log
w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of each
node in the graph and `w(V^*)` denotes the sum of the weights of
each node in the minimum weight dominating set for the graph.
This implementation of the algorithm runs in `O(m)` time, where `m`
is the number of edges in the graph.
References
----------
.. [1] Vazirani, Vijay V.
*Approximation Algorithms*.
Springer Science & Business Media, 2001.
"""
# The unique dominating set for the null graph is the empty set.
if len(G) == 0:
return set()
# This is the dominating set that will eventually be returned.
dom_set = set()
def _cost(node_and_neighborhood):
"""Returns the cost-effectiveness of greedily choosing the given
node.
`node_and_neighborhood` is a two-tuple comprising a node and its
closed neighborhood.
"""
v, neighborhood = node_and_neighborhood
return G.node[v].get(weight, 1) / len(neighborhood - dom_set)
# This is a set of all vertices not already covered by the
# dominating set.
vertices = set(G)
# This is a dictionary mapping each node to the closed neighborhood
# of that node.
neighborhoods = {v: {v} | set(G[v]) for v in G}
# Continue until all vertices are adjacent to some node in the
# dominating set.
while vertices:
# Find the most cost-effective node to add, along with its
# closed neighborhood.
dom_node, min_set = min(neighborhoods.items(), key=_cost)
# Add the node to the dominating set and reduce the remaining
# set of nodes to cover.
dom_set.add(dom_node)
del neighborhoods[dom_node]
vertices -= min_set
return dom_set
def min_edge_dominating_set(G):
r"""Return minimum cardinality edge dominating set.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
min_edge_dominating_set : set
Returns a set of dominating edges whose size is no more than 2 * OPT.
Notes
-----
The algorithm computes an approximate solution to the edge dominating set
problem. The result is no more than 2 * OPT in terms of size of the set.
Runtime of the algorithm is `O(|E|)`.
"""
if not G:
raise ValueError("Expected non-empty NetworkX graph!")
return maximal_matching(G)
| bsd-3-clause |
Tithen-Firion/youtube-dl | youtube_dl/extractor/radiofrance.py | 91 | 2089 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class RadioFranceIE(InfoExtractor):
_VALID_URL = r'^https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)'
IE_NAME = 'radiofrance'
_TEST = {
'url': 'http://maison.radiofrance.fr/radiovisions/one-one',
'md5': 'bdbb28ace95ed0e04faab32ba3160daf',
'info_dict': {
'id': 'one-one',
'ext': 'ogg',
'title': 'One to one',
'description': "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
'uploader': 'Thomas Hercouët',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>',
webpage, 'description', fatal=False)
uploader = self._html_search_regex(
r'<div class="credit"> © (.*?)</div>',
webpage, 'uploader', fatal=False)
formats_str = self._html_search_regex(
r'class="jp-jplayer[^"]*" data-source="([^"]+)">',
webpage, 'audio URLs')
formats = [
{
'format_id': fm[0],
'url': fm[1],
'vcodec': 'none',
'preference': i,
}
for i, fm in
enumerate(re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str))
]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'uploader': uploader,
}
| unlicense |
IKholopov/HackUPC2017 | hackupc/env/lib/python3.5/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py | 384 | 6309 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import base
from .. import _ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("ascii", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
try:
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
except AttributeError:
pass
try:
node = et.getroot()
except AttributeError:
node = et
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(base.NonRecursiveTreeWalker):
def __init__(self, tree):
# pylint:disable=redefined-variable-type
if isinstance(tree, list):
self.fragmentChildren = set(tree)
tree = FragmentRoot(tree)
else:
self.fragmentChildren = set()
tree = Root(tree)
base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = _ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (base.DOCUMENT,)
elif isinstance(node, Doctype):
return base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return base.TEXT, ensure_str(node.obj)
elif node.tag == etree.Comment:
return base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
elif node in self.fragmentChildren:
return None
return node.getparent()
| apache-2.0 |
lacrazyboy/scrapy | scrapy/utils/log.py | 108 | 6012 | # -*- coding: utf-8 -*-
import sys
import logging
import warnings
from logging.config import dictConfig
from twisted.python.failure import Failure
from twisted.python import log as twisted_log
import scrapy
from scrapy.settings import overridden_settings, Settings
from scrapy.exceptions import ScrapyDeprecationWarning
logger = logging.getLogger(__name__)
def failure_to_exc_info(failure):
"""Extract exc_info from Failure instances"""
if isinstance(failure, Failure):
return (failure.type, failure.value, failure.getTracebackObject())
class TopLevelFormatter(logging.Filter):
"""Keep only top level loggers's name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
`loggers` list where it should act.
"""
def __init__(self, loggers=None):
self.loggers = loggers or []
def filter(self, record):
if any(record.name.startswith(l + '.') for l in self.loggers):
record.name = record.name.split('.', 1)[0]
return True
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'scrapy': {
'level': 'DEBUG',
},
'twisted': {
'level': 'ERROR',
},
}
}
def configure_logging(settings=None, install_root_handler=True):
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver('twisted')
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool('LOG_STDOUT'):
sys.stdout = StreamLogger(logging.getLogger('stdout'))
if install_root_handler:
logging.root.setLevel(logging.NOTSET)
handler = _get_handler(settings)
logging.root.addHandler(handler)
def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
def log_scrapy_info(settings):
logger.info("Scrapy %(version)s started (bot: %(bot)s)",
{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
logger.info("Optional features available: %(features)s",
{'features': ", ".join(scrapy.optional_features)})
d = dict(overridden_settings(settings))
logger.info("Overridden settings: %(settings)r", {'settings': d})
class StreamLogger(object):
"""Fake file-like stream object that redirects writes to a logger instance
Taken from:
http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
class LogCounterHandler(logging.Handler):
"""Record log levels count into a crawler stats"""
def __init__(self, crawler, *args, **kwargs):
super(LogCounterHandler, self).__init__(*args, **kwargs)
self.crawler = crawler
def emit(self, record):
sname = 'log_count/{}'.format(record.levelname)
self.crawler.stats.inc_value(sname)
def logformatter_adapter(logkws):
"""
Helper that takes the dictionary output from the methods in LogFormatter
and adapts it into a tuple of positional arguments for logger.log calls,
handling backward compatibility as well.
"""
if not {'level', 'msg', 'args'} <= set(logkws):
warnings.warn('Missing keys in LogFormatter method',
ScrapyDeprecationWarning)
if 'format' in logkws:
warnings.warn('`format` key in LogFormatter methods has been '
'deprecated, use `msg` instead',
ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
# NOTE: This also handles 'args' being an empty dict, that case doesn't
# play well in logger.log calls
args = logkws if not logkws.get('args') else logkws['args']
return (level, message, args)
| bsd-3-clause |
cirrusone/phantom2 | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py | 119 | 23727 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import datetime
import StringIO
from .bugzilla import Bugzilla, BugzillaQueries, EditUsersParser
from webkitpy.common.config import urls
from webkitpy.common.config.committers import Reviewer, Committer, Contributor, CommitterList
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.web_mock import MockBrowser
from webkitpy.thirdparty.mock import Mock
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BugzillaTest(unittest.TestCase):
_example_attachment = '''
<attachment
isobsolete="1"
ispatch="1"
isprivate="0"
>
<attachid>33721</attachid>
<date>2009-07-29 10:23 PDT</date>
<desc>Fixed whitespace issue</desc>
<filename>patch</filename>
<type>text/plain</type>
<size>9719</size>
<attacher>[email protected]</attacher>
<flag name="review"
id="17931"
status="+"
setter="[email protected]"
/>
<flag name="commit-queue"
id="17932"
status="+"
setter="[email protected]"
/>
</attachment>
'''
_expected_example_attachment_parsing = {
'attach_date': datetime.datetime(2009, 07, 29, 10, 23),
'bug_id' : 100,
'is_obsolete' : True,
'is_patch' : True,
'id' : 33721,
'url' : "https://bugs.webkit.org/attachment.cgi?id=33721",
'name' : "Fixed whitespace issue",
'type' : "text/plain",
'review' : '+',
'reviewer_email' : '[email protected]',
'commit-queue' : '+',
'committer_email' : '[email protected]',
'attacher_email' : '[email protected]',
}
def test_url_creation(self):
# FIXME: These would be all better as doctests
bugs = Bugzilla()
self.assertIsNone(bugs.bug_url_for_bug_id(None))
self.assertIsNone(bugs.short_bug_url_for_bug_id(None))
self.assertIsNone(bugs.attachment_url_for_id(None))
def test_parse_bug_id(self):
# Test that we can parse the urls we produce.
bugs = Bugzilla()
self.assertEqual(12345, urls.parse_bug_id(bugs.short_bug_url_for_bug_id(12345)))
self.assertEqual(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345)))
self.assertEqual(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True)))
_bug_xml = """
<bug>
<bug_id>32585</bug_id>
<creation_ts>2009-12-15 15:17 PST</creation_ts>
<short_desc>bug to test webkit-patch's and commit-queue's failures</short_desc>
<delta_ts>2009-12-27 21:04:50 PST</delta_ts>
<reporter_accessible>1</reporter_accessible>
<cclist_accessible>1</cclist_accessible>
<classification_id>1</classification_id>
<classification>Unclassified</classification>
<product>WebKit</product>
<component>Tools / Tests</component>
<version>528+ (Nightly build)</version>
<rep_platform>PC</rep_platform>
<op_sys>Mac OS X 10.5</op_sys>
<bug_status>NEW</bug_status>
<priority>P2</priority>
<bug_severity>Normal</bug_severity>
<target_milestone>---</target_milestone>
<everconfirmed>1</everconfirmed>
<reporter name="Eric Seidel">[email protected]</reporter>
<assigned_to name="Nobody">[email protected]</assigned_to>
<cc>[email protected]</cc>
<cc>[email protected]</cc>
<long_desc isprivate="0">
<who name="Eric Seidel">[email protected]</who>
<bug_when>2009-12-15 15:17:28 PST</bug_when>
<thetext>bug to test webkit-patch and commit-queue failures
Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.</thetext>
</long_desc>
<attachment
isobsolete="0"
ispatch="1"
isprivate="0"
>
<attachid>45548</attachid>
<date>2009-12-27 23:51 PST</date>
<desc>Patch</desc>
<filename>bug-32585-20091228005112.patch</filename>
<type>text/plain</type>
<size>10882</size>
<attacher>[email protected]</attacher>
<token>1261988248-dc51409e9c421a4358f365fa8bec8357</token>
<data encoding="base64">SW5kZXg6IFdlYktpdC9tYWMvQ2hhbmdlTG9nCj09PT09PT09PT09PT09PT09PT09PT09PT09PT09
removed-because-it-was-really-long
ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg==
</data>
<flag name="review"
id="27602"
status="?"
setter="[email protected]"
/>
</attachment>
</bug>
"""
_single_bug_xml = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<!DOCTYPE bugzilla SYSTEM "https://bugs.webkit.org/bugzilla.dtd">
<bugzilla version="3.2.3"
urlbase="https://bugs.webkit.org/"
maintainer="[email protected]"
exporter="[email protected]"
>
%s
</bugzilla>
""" % _bug_xml
_expected_example_bug_parsing = {
"id" : 32585,
"title" : u"bug to test webkit-patch's and commit-queue's failures",
"cc_emails" : ["[email protected]", "[email protected]"],
"reporter_email" : "[email protected]",
"assigned_to_email" : "[email protected]",
"bug_status": "NEW",
"attachments" : [{
"attach_date": datetime.datetime(2009, 12, 27, 23, 51),
'name': u'Patch',
'url' : "https://bugs.webkit.org/attachment.cgi?id=45548",
'is_obsolete': False,
'review': '?',
'is_patch': True,
'attacher_email': '[email protected]',
'bug_id': 32585,
'type': 'text/plain',
'id': 45548
}],
"comments" : [{
'comment_date': datetime.datetime(2009, 12, 15, 15, 17, 28),
'comment_email': '[email protected]',
'text': """bug to test webkit-patch and commit-queue failures
Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.""",
}]
}
# FIXME: This should move to a central location and be shared by more unit tests.
def _assert_dictionaries_equal(self, actual, expected):
# Make sure we aren't parsing more or less than we expect
self.assertItemsEqual(actual.keys(), expected.keys())
for key, expected_value in expected.items():
self.assertEqual(actual[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, actual[key], expected_value)))
def test_parse_bug_dictionary_from_xml(self):
bug = Bugzilla()._parse_bug_dictionary_from_xml(self._single_bug_xml)
self._assert_dictionaries_equal(bug, self._expected_example_bug_parsing)
_sample_multi_bug_xml = """
<bugzilla version="3.2.3" urlbase="https://bugs.webkit.org/" maintainer="[email protected]" exporter="[email protected]">
%s
%s
</bugzilla>
""" % (_bug_xml, _bug_xml)
def test_parse_bugs_from_xml(self):
bugzilla = Bugzilla()
bugs = bugzilla._parse_bugs_from_xml(self._sample_multi_bug_xml)
self.assertEqual(len(bugs), 2)
self.assertEqual(bugs[0].id(), self._expected_example_bug_parsing['id'])
bugs = bugzilla._parse_bugs_from_xml("")
self.assertEqual(len(bugs), 0)
# This could be combined into test_bug_parsing later if desired.
def test_attachment_parsing(self):
bugzilla = Bugzilla()
soup = BeautifulSoup(self._example_attachment)
attachment_element = soup.find("attachment")
attachment = bugzilla._parse_attachment_element(attachment_element, self._expected_example_attachment_parsing['bug_id'])
self.assertTrue(attachment)
self._assert_dictionaries_equal(attachment, self._expected_example_attachment_parsing)
_sample_attachment_detail_page = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>
Attachment 41073 Details for Bug 27314</title>
<link rel="Top" href="https://bugs.webkit.org/">
<link rel="Up" href="show_bug.cgi?id=27314">
"""
def test_attachment_detail_bug_parsing(self):
bugzilla = Bugzilla()
self.assertEqual(27314, bugzilla._parse_bug_id_from_attachment_page(self._sample_attachment_detail_page))
def test_add_cc_to_bug(self):
bugzilla = Bugzilla()
bugzilla.browser = MockBrowser()
bugzilla.authenticate = lambda: None
expected_logs = "Adding ['[email protected]'] to the CC list for bug 42\n"
OutputCapture().assert_outputs(self, bugzilla.add_cc_to_bug, [42, ["[email protected]"]], expected_logs=expected_logs)
def _mock_control_item(self, name):
mock_item = Mock()
mock_item.name = name
return mock_item
def _mock_find_control(self, item_names=[], selected_index=0):
mock_control = Mock()
mock_control.items = [self._mock_control_item(name) for name in item_names]
mock_control.value = [item_names[selected_index]] if item_names else None
return lambda name, type: mock_control
def _assert_reopen(self, item_names=None, selected_index=None, extra_logs=None):
bugzilla = Bugzilla()
bugzilla.browser = MockBrowser()
bugzilla.authenticate = lambda: None
mock_find_control = self._mock_find_control(item_names, selected_index)
bugzilla.browser.find_control = mock_find_control
expected_logs = "Re-opening bug 42\n['comment']\n"
if extra_logs:
expected_logs += extra_logs
OutputCapture().assert_outputs(self, bugzilla.reopen_bug, [42, ["comment"]], expected_logs=expected_logs)
def test_reopen_bug(self):
self._assert_reopen(item_names=["REOPENED", "RESOLVED", "CLOSED"], selected_index=1)
self._assert_reopen(item_names=["UNCONFIRMED", "RESOLVED", "CLOSED"], selected_index=1)
extra_logs = "Did not reopen bug 42, it appears to already be open with status ['NEW'].\n"
self._assert_reopen(item_names=["NEW", "RESOLVED"], selected_index=0, extra_logs=extra_logs)
def test_file_object_for_upload(self):
bugzilla = Bugzilla()
file_object = StringIO.StringIO()
unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
utf8_tor = unicode_tor.encode("utf-8")
self.assertEqual(bugzilla._file_object_for_upload(file_object), file_object)
self.assertEqual(bugzilla._file_object_for_upload(utf8_tor).read(), utf8_tor)
self.assertEqual(bugzilla._file_object_for_upload(unicode_tor).read(), utf8_tor)
def test_filename_for_upload(self):
bugzilla = Bugzilla()
mock_file = Mock()
mock_file.name = "foo"
self.assertEqual(bugzilla._filename_for_upload(mock_file, 1234), 'foo')
mock_timestamp = lambda: "now"
filename = bugzilla._filename_for_upload(StringIO.StringIO(), 1234, extension="patch", timestamp=mock_timestamp)
self.assertEqual(filename, "bug-1234-now.patch")
def test_commit_queue_flag(self):
bugzilla = Bugzilla()
bugzilla.committers = CommitterList(reviewers=[Reviewer("WebKit Reviewer", "[email protected]")],
committers=[Committer("WebKit Committer", "[email protected]")],
contributors=[Contributor("WebKit Contributor", "[email protected]")])
def assert_commit_queue_flag(mark_for_landing, mark_for_commit_queue, expected, username=None):
bugzilla.username = username
capture = OutputCapture()
capture.capture_output()
try:
self.assertEqual(bugzilla._commit_queue_flag(mark_for_landing=mark_for_landing, mark_for_commit_queue=mark_for_commit_queue), expected)
finally:
capture.restore_output()
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='[email protected]')
def test__check_create_bug_response(self):
bugzilla = Bugzilla()
title_html_bugzilla_323 = "<title>Bug 101640 Submitted</title>"
self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_323), '101640')
title_html_bugzilla_425 = "<title>Bug 101640 Submitted – Testing webkit-patch again</title>"
self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_425), '101640')
class BugzillaQueriesTest(unittest.TestCase):
_sample_request_page = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Request Queue</title>
</head>
<body>
<h3>Flag: review</h3>
<table class="requests" cellspacing="0" cellpadding="4" border="1">
<tr>
<th>Requester</th>
<th>Requestee</th>
<th>Bug</th>
<th>Attachment</th>
<th>Created</th>
</tr>
<tr>
<td>Shinichiro Hamaji <hamaji@chromium.org></td>
<td></td>
<td><a href="show_bug.cgi?id=30015">30015: text-transform:capitalize is failing in CSS2.1 test suite</a></td>
<td><a href="attachment.cgi?id=40511&action=review">
40511: Patch v0</a></td>
<td>2009-10-02 04:58 PST</td>
</tr>
<tr>
<td>Zan Dobersek <zandobersek@gmail.com></td>
<td></td>
<td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
<td><a href="attachment.cgi?id=40722&action=review">
40722: Media controls, the simple approach</a></td>
<td>2009-10-06 09:13 PST</td>
</tr>
<tr>
<td>Zan Dobersek <zandobersek@gmail.com></td>
<td></td>
<td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
<td><a href="attachment.cgi?id=40723&action=review">
40723: Adjust the media slider thumb size</a></td>
<td>2009-10-06 09:15 PST</td>
</tr>
</table>
</body>
</html>
"""
_sample_quip_page = u"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Bugzilla Quip System</title>
</head>
<body>
<h2>
Existing quips:
</h2>
<ul>
<li>Everything should be made as simple as possible, but not simpler. - Albert Einstein</li>
<li>Good artists copy. Great artists steal. - Pablo Picasso</li>
<li>\u00e7gua mole em pedra dura, tanto bate at\u008e que fura.</li>
</ul>
</body>
</html>
"""
def _assert_result_count(self, queries, html, count):
self.assertEqual(queries._parse_result_count(html), count)
def test_parse_result_count(self):
queries = BugzillaQueries(None)
# Pages with results, always list the count at least twice.
self._assert_result_count(queries, '<span class="bz_result_count">314 bugs found.</span><span class="bz_result_count">314 bugs found.</span>', 314)
self._assert_result_count(queries, '<span class="bz_result_count">Zarro Boogs found.</span>', 0)
self._assert_result_count(queries, '<span class="bz_result_count">\n \nOne bug found.</span>', 1)
self.assertRaises(Exception, queries._parse_result_count, ['Invalid'])
def test_request_page_parsing(self):
queries = BugzillaQueries(None)
self.assertEqual([40511, 40722, 40723], queries._parse_attachment_ids_request_query(self._sample_request_page))
def test_quip_page_parsing(self):
queries = BugzillaQueries(None)
expected_quips = ["Everything should be made as simple as possible, but not simpler. - Albert Einstein", "Good artists copy. Great artists steal. - Pablo Picasso", u"\u00e7gua mole em pedra dura, tanto bate at\u008e que fura."]
self.assertEqual(expected_quips, queries._parse_quips(self._sample_quip_page))
def test_load_query(self):
queries = BugzillaQueries(Mock())
queries._load_query("request.cgi?action=queue&type=review&group=type")
class EditUsersParserTest(unittest.TestCase):
_example_user_results = """
<div id="bugzilla-body">
<p>1 user found.</p>
<table id="admin_table" border="1" cellpadding="4" cellspacing="0">
<tr bgcolor="#6666FF">
<th align="left">Edit user...
</th>
<th align="left">Real name
</th>
<th align="left">Account History
</th>
</tr>
<tr>
<td >
<a href="editusers.cgi?action=edit&userid=1234&matchvalue=login_name&groupid=&grouprestrict=&matchtype=substr&matchstr=abarth%40webkit.org">
abarth@webkit.org
</a>
</td>
<td >
Adam Barth
</td>
<td >
<a href="editusers.cgi?action=activity&userid=1234&matchvalue=login_name&groupid=&grouprestrict=&matchtype=substr&matchstr=abarth%40webkit.org">
View
</a>
</td>
</tr>
</table>
"""
_example_empty_user_results = """
<div id="bugzilla-body">
<p>0 users found.</p>
<table id="admin_table" border="1" cellpadding="4" cellspacing="0">
<tr bgcolor="#6666FF">
<th align="left">Edit user...
</th>
<th align="left">Real name
</th>
<th align="left">Account History
</th>
</tr>
<tr><td colspan="3" align="center"><i><none></i></td></tr>
</table>
"""
def _assert_login_userid_pairs(self, results_page, expected_logins):
parser = EditUsersParser()
logins = parser.login_userid_pairs_from_edit_user_results(results_page)
self.assertEqual(logins, expected_logins)
def test_logins_from_editusers_results(self):
self._assert_login_userid_pairs(self._example_user_results, [("[email protected]", 1234)])
self._assert_login_userid_pairs(self._example_empty_user_results, [])
_example_user_page = """<table class="main"><tr>
<th><label for="login">Login name:</label></th>
<td>eric@webkit.org
</td>
</tr>
<tr>
<th><label for="name">Real name:</label></th>
<td>Eric Seidel
</td>
</tr>
<tr>
<th>Group access:</th>
<td>
<table class="groups">
<tr>
</tr>
<tr>
<th colspan="2">User is a member of these groups</th>
</tr>
<tr class="direct">
<td class="checkbox"><input type="checkbox"
id="group_7"
name="group_7"
value="1" checked="checked" /></td>
<td class="groupname">
<label for="group_7">
<strong>canconfirm:</strong>
Can confirm a bug.
</label>
</td>
</tr>
<tr class="direct">
<td class="checkbox"><input type="checkbox"
id="group_6"
name="group_6"
value="1" /></td>
<td class="groupname">
<label for="group_6">
<strong>editbugs:</strong>
Can edit all aspects of any bug.
/label>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Product responsibilities:</th>
<td>
<em>none</em>
</td>
</tr>
</table>"""
def test_user_dict_from_edit_user_page(self):
parser = EditUsersParser()
user_dict = parser.user_dict_from_edit_user_page(self._example_user_page)
expected_user_dict = {u'login': u'[email protected]', u'groups': set(['canconfirm']), u'name': u'Eric Seidel'}
self.assertEqual(expected_user_dict, user_dict)
| bsd-3-clause |
ewandor/home-assistant | homeassistant/components/sensor/toon.py | 10 | 6813 | """
Component for the rebranded Quby thermostat as provided by Eneco.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.toon/
"""
import logging
import datetime as datetime
from homeassistant.helpers.entity import Entity
import homeassistant.components.toon as toon_main
_LOGGER = logging.getLogger(__name__)
STATE_ATTR_DEVICE_TYPE = 'device_type'
STATE_ATTR_LAST_CONNECTED_CHANGE = 'last_connected_change'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Toon sensors."""
_toon_main = hass.data[toon_main.TOON_HANDLE]
sensor_items = []
sensor_items.extend([
ToonSensor(hass, 'Power_current', 'power-plug', 'Watt'),
ToonSensor(hass, 'Power_today', 'power-plug', 'kWh'),
])
if _toon_main.gas:
sensor_items.extend([
ToonSensor(hass, 'Gas_current', 'gas-cylinder', 'CM3'),
ToonSensor(hass, 'Gas_today', 'gas-cylinder', 'M3'),
])
for plug in _toon_main.toon.smartplugs:
sensor_items.extend([
FibaroSensor(hass, '{}_current_power'.format(plug.name),
plug.name, 'power-socket-eu', 'Watt'),
FibaroSensor(hass, '{}_today_energy'.format(plug.name),
plug.name, 'power-socket-eu', 'kWh'),
])
if _toon_main.toon.solar.produced or _toon_main.solar:
sensor_items.extend([
SolarSensor(hass, 'Solar_maximum', 'kWh'),
SolarSensor(hass, 'Solar_produced', 'kWh'),
SolarSensor(hass, 'Solar_value', 'Watt'),
SolarSensor(hass, 'Solar_average_produced', 'kWh'),
SolarSensor(hass, 'Solar_meter_reading_low_produced', 'kWh'),
SolarSensor(hass, 'Solar_meter_reading_produced', 'kWh'),
SolarSensor(hass, 'Solar_daily_cost_produced', 'Euro'),
])
for smokedetector in _toon_main.toon.smokedetectors:
sensor_items.append(
FibaroSmokeDetector(
hass, '{}_smoke_detector'.format(smokedetector.name),
smokedetector.device_uuid, 'alarm-bell', '%')
)
add_devices(sensor_items)
class ToonSensor(Entity):
"""Representation of a Toon sensor."""
def __init__(self, hass, name, icon, unit_of_measurement):
"""Initialize the Toon sensor."""
self._name = name
self._state = None
self._icon = 'mdi:{}'.format(icon)
self._unit_of_measurement = unit_of_measurement
self.thermos = hass.data[toon_main.TOON_HANDLE]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the mdi icon of the sensor."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self.thermos.get_data(self.name.lower())
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from the sensor."""
self.thermos.update()
class FibaroSensor(Entity):
"""Representation of a Fibaro sensor."""
def __init__(self, hass, name, plug_name, icon, unit_of_measurement):
"""Initialize the Fibaro sensor."""
self._name = name
self._plug_name = plug_name
self._state = None
self._icon = 'mdi:{}'.format(icon)
self._unit_of_measurement = unit_of_measurement
self.toon = hass.data[toon_main.TOON_HANDLE]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the mdi icon of the sensor."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
value = '_'.join(self.name.lower().split('_')[1:])
return self.toon.get_data(value, self._plug_name)
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from the sensor."""
self.toon.update()
class SolarSensor(Entity):
"""Representation of a Solar sensor."""
def __init__(self, hass, name, unit_of_measurement):
"""Initialize the Solar sensor."""
self._name = name
self._state = None
self._icon = 'mdi:weather-sunny'
self._unit_of_measurement = unit_of_measurement
self.toon = hass.data[toon_main.TOON_HANDLE]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the mdi icon of the sensor."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self.toon.get_data(self.name.lower())
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from the sensor."""
self.toon.update()
class FibaroSmokeDetector(Entity):
"""Representation of a Fibaro smoke detector."""
def __init__(self, hass, name, uid, icon, unit_of_measurement):
"""Initialize the Fibaro smoke sensor."""
self._name = name
self._uid = uid
self._state = None
self._icon = 'mdi:{}'.format(icon)
self._unit_of_measurement = unit_of_measurement
self.toon = hass.data[toon_main.TOON_HANDLE]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the mdi icon of the sensor."""
return self._icon
@property
def state_attributes(self):
"""Return the state attributes of the smoke detectors."""
value = datetime.datetime.fromtimestamp(
int(self.toon.get_data('last_connected_change', self.name))
).strftime('%Y-%m-%d %H:%M:%S')
return {
STATE_ATTR_DEVICE_TYPE:
self.toon.get_data('device_type', self.name),
STATE_ATTR_LAST_CONNECTED_CHANGE: value,
}
@property
def state(self):
"""Return the state of the sensor."""
value = self.name.lower().split('_', 1)[1]
return self.toon.get_data(value, self.name)
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from the sensor."""
self.toon.update()
| apache-2.0 |
wxwilcke/pakbon-ld | src/pakbon-ld.py | 1 | 4821 | #!/usr/bin/python3
import sys
import os
import argparse
import re
import data.writer as writer
import translator
supported_version = '3.1.0'
version = '0.2'
def main(parser):
sikb_zip = re.sub('src/pakbon-ld.py', 'if/SIKB0102 versie 3.1.0 XSD en Lookup domeintabellen.zip', os.path.realpath(__file__))
args = parser.parse_args()
if args.version:
print('Pakbon-ld v{0}'.format(version))
sys.exit(0)
if args.ignore_version:
print('WARNING: version discrepancy may result in errors')
if args.endpoint == '':
args.endpoint = args.default_namespace + '/sparql/'
if len(args.align_with) > 0:
args.align_with = args.align_with[0].split()
if args.input_path == '' and args.generate_ontology is False and args.generate_ontology is False:
print('Missing required input (flags).\nUse \'pakbon-ld.py -h\' for help.')
sys.exit(1)
if args.output_path == '':
if args.input_path != '' :
args.output_path = os.getcwd() + '/' + re.sub(r'^(?:.*/)?(.*)\..*$', r'\1', args.input_path)
else:
args.output_path = os.getcwd() + '/' + 'SIKB0102'
(graph, ontology, vocabulary) = translator.translate(args.input_path,\
args.align_with,\
args.default_namespace,\
sikb_zip,\
args.generate_ontology,\
args.generate_vocabulary,\
args.ignore_version,\
args.align,\
args.endpoint,\
args.enable_georesolver,\
args.interactive)
if graph is not None:
writer.write(graph, args.output_path + extOf(args.serialization_format), args.serialization_format)
if ontology is not None:
writer.write(ontology, args.output_path + '_Ontology' + extOf(args.serialization_format), args.serialization_format)
if vocabulary is not None:
writer.write(vocabulary, args.output_path + '_Vocabulary' + extOf(args.serialization_format), args.serialization_format)
def extOf(sformat=None):
if sformat == 'n3':
return '.n3'
elif sformat == 'nquads':
return '.nq'
elif sformat == 'ntriples':
return '.nt'
elif sformat == 'pretty-xml':
return '.xml'
elif sformat == 'trig':
return '.trig'
elif sformat == 'trix':
return '.trix'
elif sformat == 'turtle':
return '.ttl'
elif sformat == 'xml':
return '.xml'
else:
return '.rdf'
def translate(tree=None):
if tree is None:
raise ValueError()
return translate.translate(tree)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_path", help="input path", default='')
parser.add_argument("-o", "--output_path", help="output path", default='')
parser.add_argument("-d", "--default_namespace", help="default namespace of graph",\
default="http://www.example.org")
parser.add_argument("-f", "--serialization_format", help="serialization format of output",\
choices=["n3", "nquads", "ntriples", "pretty-xml", "trig", "trix", "turtle", "xml"], default='turtle')
parser.add_argument("--version", help="version of this tool", action="store_true")
parser.add_argument("--ignore_version", help="force processing unsupported version of pakbon",\
action="store_true")
parser.add_argument("--align", help="align encountered resources with default graph, via endpoint, or both",\
choices=["local", "global", "both"], default="off")
parser.add_argument("--enable_georesolver", help="resolve and link geospatial resources",\
action="store_true")
parser.add_argument("--align_with", help="one or more graphs to align with if align is set to 'local' or 'both'",\
nargs="*", default=[])
parser.add_argument("--endpoint", help="SPARQL endpoint to align with if align is set to 'global' or 'both'", default='')
parser.add_argument("--interactive", help="alignment mode", action="store_true")
parser.add_argument("--generate_ontology", help="generate and write ontology", action="store_true")
parser.add_argument("--generate_vocabulary", help="generate and write vocabulary", action="store_true")
main(parser)
| gpl-2.0 |
beeftornado/sentry | tests/sentry/api/endpoints/team_projects.py | 1 | 1746 | from __future__ import absolute_import
import six
from django.core.urlresolvers import reverse
from sentry.models import Project
from sentry.testutils import APITestCase
from sentry.utils.compat import map
class TeamProjectIndexTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team(members=[self.user])
project_1 = self.create_project(teams=[team], slug="fiz")
project_2 = self.create_project(teams=[team], slug="buzz")
url = reverse(
"sentry-api-0-team-project-index",
kwargs={"organization_slug": team.organization.slug, "team_slug": team.slug},
)
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 2
assert sorted(map(lambda x: x["id"], response.data)) == sorted(
[six.text_type(project_1.id), six.text_type(project_2.id)]
)
class TeamProjectCreateTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team(members=[self.user])
url = reverse(
"sentry-api-0-team-project-index",
kwargs={"organization_slug": team.organization.slug, "team_slug": team.slug},
)
resp = self.client.post(url, data={"name": "hello world", "slug": "foobar"})
assert resp.status_code == 201, resp.content
project = Project.objects.get(id=resp.data["id"])
assert project.name == "hello world"
assert project.slug == "foobar"
assert project.teams.first() == team
resp = self.client.post(url, data={"name": "hello world", "slug": "foobar"})
assert resp.status_code == 409, resp.content
| bsd-3-clause |
MTASZTAKI/ApertusVR | plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/gym_pendulum_envs.py | 4 | 3164 | from .scene_abstract import SingleRobotEmptyScene
from .env_bases import MJCFBaseBulletEnv
from robot_pendula import InvertedPendulum, InvertedPendulumSwingup, InvertedDoublePendulum
import gym, gym.spaces, gym.utils, gym.utils.seeding
import numpy as np
import pybullet
import os, sys
class InvertedPendulumBulletEnv(MJCFBaseBulletEnv):
def __init__(self):
self.robot = InvertedPendulum()
MJCFBaseBulletEnv.__init__(self, self.robot)
self.stateId = -1
def create_single_player_scene(self, bullet_client):
return SingleRobotEmptyScene(bullet_client, gravity=9.8, timestep=0.0165, frame_skip=1)
def reset(self):
if (self.stateId >= 0):
#print("InvertedPendulumBulletEnv reset p.restoreState(",self.stateId,")")
self._p.restoreState(self.stateId)
r = MJCFBaseBulletEnv.reset(self)
if (self.stateId < 0):
self.stateId = self._p.saveState()
#print("InvertedPendulumBulletEnv reset self.stateId=",self.stateId)
return r
def step(self, a):
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.pos_x self.pos_y
vel_penalty = 0
if self.robot.swingup:
reward = np.cos(self.robot.theta)
done = False
else:
reward = 1.0
done = np.abs(self.robot.theta) > .2
self.rewards = [float(reward)]
self.HUD(state, a, done)
return state, sum(self.rewards), done, {}
def camera_adjust(self):
self.camera.move_and_look_at(0, 1.2, 1.0, 0, 0, 0.5)
class InvertedPendulumSwingupBulletEnv(InvertedPendulumBulletEnv):
def __init__(self):
self.robot = InvertedPendulumSwingup()
MJCFBaseBulletEnv.__init__(self, self.robot)
self.stateId = -1
class InvertedDoublePendulumBulletEnv(MJCFBaseBulletEnv):
def __init__(self):
self.robot = InvertedDoublePendulum()
MJCFBaseBulletEnv.__init__(self, self.robot)
self.stateId = -1
def create_single_player_scene(self, bullet_client):
return SingleRobotEmptyScene(bullet_client, gravity=9.8, timestep=0.0165, frame_skip=1)
def reset(self):
if (self.stateId >= 0):
self._p.restoreState(self.stateId)
r = MJCFBaseBulletEnv.reset(self)
if (self.stateId < 0):
self.stateId = self._p.saveState()
return r
def step(self, a):
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.pos_x self.pos_y
# upright position: 0.6 (one pole) + 0.6 (second pole) * 0.5 (middle of second pole) = 0.9
# using <site> tag in original xml, upright position is 0.6 + 0.6 = 1.2, difference +0.3
dist_penalty = 0.01 * self.robot.pos_x**2 + (self.robot.pos_y + 0.3 - 2)**2
# v1, v2 = self.model.data.qvel[1:3] TODO when this fixed https://github.com/bulletphysics/bullet3/issues/1040
#vel_penalty = 1e-3 * v1**2 + 5e-3 * v2**2
vel_penalty = 0
alive_bonus = 10
done = self.robot.pos_y + 0.3 <= 1
self.rewards = [float(alive_bonus), float(-dist_penalty), float(-vel_penalty)]
self.HUD(state, a, done)
return state, sum(self.rewards), done, {}
def camera_adjust(self):
self.camera.move_and_look_at(0, 1.2, 1.2, 0, 0, 0.5)
| mit |
alfa-addon/addon | plugin.video.alfa/lib/python_libtorrent/linux_mipsel_ucs4/1.0.9/__init__.py | 362 | 1240 | #-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
| gpl-3.0 |
nphilipp/python-slip | slip/gtk/tools.py | 1 | 2129 | # -*- coding: utf-8 -*-
# slip.gtk.tools -- utility functions for gtk
#
# Copyright © 2004, 2007 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Thomas Woerner <[email protected]>
# Florian Festi <[email protected]>
"""This module contains the label_set_autowrap() function which makes labels
re-wrap themselves automatically if their containers change in size."""
from __future__ import absolute_import
# ensure range() returns a generator
if 'xrange' in dir(__builtins__):
range = xrange
import gtk
import pango
__all__ = ["label_set_autowrap"]
def label_set_autowrap(widget):
"""Make labels automatically re-wrap if their containers are resized.
Accepts label or container widgets."""
if isinstance(widget, gtk.Container):
children = widget.get_children()
for i in range(len(children)):
label_set_autowrap(children[i])
elif isinstance(widget, gtk.Label) and widget.get_line_wrap():
widget.connect_after("size-allocate", __label_size_allocate)
def __label_size_allocate(widget, allocation):
"""Callback which re-allocates the size of a label."""
layout = widget.get_layout()
(lw_old, lh_old) = layout.get_size()
# fixed width labels
if lw_old / pango.SCALE == allocation.width:
return
# set wrap width to the pango.Layout of the labels ###
layout.set_width(allocation.width * pango.SCALE)
(lw, lh) = layout.get_size()
if lh_old != lh:
widget.set_size_request(-1, lh / pango.SCALE)
| gpl-2.0 |
BAMitUp/Fantasy-Football-Shuffler | ENV/lib/python2.7/site-packages/pip/commands/install.py | 156 | 14971 | from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
try:
import wheel
except ImportError:
wheel = None
from pip.req import RequirementSet
from pip.basecommand import RequirementCommand
from pip.locations import virtualenv_no_global, distutils_scheme
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils import ensure_dir
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.filesystem import check_path_owner
from pip.wheel import WheelCache, WheelBuilder
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. This process is recursive regardless of whether '
'a dependency is already satisfied.'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.download_dir:
warnings.warn(
"pip install --download has been deprecated and will be "
"removed in the future. Pip now has a download command that "
"should be used instead.",
RemovedInPip10Warning,
)
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if options.prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
require_hashes=options.require_hashes,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
prefix=options.prefix_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
| gpl-3.0 |
abcdelf/apm_planner | libs/mavlink/share/pyshared/pymavlink/mavextra.py | 28 | 4562 | #!/usr/bin/env python
'''
useful extra functions for use by mavlink clients
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
from math import *
def kmh(mps):
'''convert m/s to Km/h'''
return mps*3.6
def altitude(press_abs, ground_press=955.0, ground_temp=30):
'''calculate barometric altitude'''
return log(ground_press/press_abs)*(ground_temp+273.15)*29271.267*0.001
def mag_heading(RAW_IMU, ATTITUDE, declination=0, SENSOR_OFFSETS=None, ofs=None):
'''calculate heading from raw magnetometer'''
mag_x = RAW_IMU.xmag
mag_y = RAW_IMU.ymag
mag_z = RAW_IMU.zmag
if SENSOR_OFFSETS is not None and ofs is not None:
mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x
mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y
mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z
headX = mag_x*cos(ATTITUDE.pitch) + mag_y*sin(ATTITUDE.roll)*sin(ATTITUDE.pitch) + mag_z*cos(ATTITUDE.roll)*sin(ATTITUDE.pitch)
headY = mag_y*cos(ATTITUDE.roll) - mag_z*sin(ATTITUDE.roll)
heading = degrees(atan2(-headY,headX)) + declination
if heading < 0:
heading += 360
return heading
def mag_field(RAW_IMU, SENSOR_OFFSETS=None, ofs=None):
'''calculate magnetic field strength from raw magnetometer'''
mag_x = RAW_IMU.xmag
mag_y = RAW_IMU.ymag
mag_z = RAW_IMU.zmag
if SENSOR_OFFSETS is not None and ofs is not None:
mag_x += ofs[0] - SENSOR_OFFSETS.mag_ofs_x
mag_y += ofs[1] - SENSOR_OFFSETS.mag_ofs_y
mag_z += ofs[2] - SENSOR_OFFSETS.mag_ofs_z
return sqrt(mag_x**2 + mag_y**2 + mag_z**2)
def angle_diff(angle1, angle2):
'''show the difference between two angles in degrees'''
ret = angle1 - angle2
if ret > 180:
ret -= 360;
if ret < -180:
ret += 360
return ret
lowpass_data = {}
def lowpass(var, key, factor):
'''a simple lowpass filter'''
global lowpass_data
if not key in lowpass_data:
lowpass_data[key] = var
else:
lowpass_data[key] = factor*lowpass_data[key] + (1.0 - factor)*var
return lowpass_data[key]
last_delta = {}
def delta(var, key):
'''calculate slope'''
global last_delta
dv = 0
if key in last_delta:
dv = var - last_delta[key]
last_delta[key] = var
return dv
def delta_angle(var, key):
'''calculate slope of an angle'''
global last_delta
dv = 0
if key in last_delta:
dv = var - last_delta[key]
last_delta[key] = var
if dv > 180:
dv -= 360
if dv < -180:
dv += 360
return dv
def roll_estimate(RAW_IMU,smooth=0.7):
'''estimate roll from accelerometer'''
rx = lowpass(RAW_IMU.xacc,'rx',smooth)
ry = lowpass(RAW_IMU.yacc,'ry',smooth)
rz = lowpass(RAW_IMU.zacc,'rz',smooth)
return degrees(-asin(ry/sqrt(rx**2+ry**2+rz**2)))
def pitch_estimate(RAW_IMU, smooth=0.7):
'''estimate pitch from accelerometer'''
rx = lowpass(RAW_IMU.xacc,'rx',smooth)
ry = lowpass(RAW_IMU.yacc,'ry',smooth)
rz = lowpass(RAW_IMU.zacc,'rz',smooth)
return degrees(asin(rx/sqrt(rx**2+ry**2+rz**2)))
def gravity(RAW_IMU, SENSOR_OFFSETS=None, ofs=None, smooth=0.7):
'''estimate pitch from accelerometer'''
rx = RAW_IMU.xacc
ry = RAW_IMU.yacc
rz = RAW_IMU.zacc+45
if SENSOR_OFFSETS is not None and ofs is not None:
rx += ofs[0] - SENSOR_OFFSETS.accel_cal_x
ry += ofs[1] - SENSOR_OFFSETS.accel_cal_y
rz += ofs[2] - SENSOR_OFFSETS.accel_cal_z
return lowpass(sqrt(rx**2+ry**2+rz**2)*0.01,'_gravity',smooth)
def pitch_sim(SIMSTATE, GPS_RAW):
'''estimate pitch from SIMSTATE accels'''
xacc = SIMSTATE.xacc - lowpass(delta(GPS_RAW.v,"v")*6.6, "v", 0.9)
zacc = SIMSTATE.zacc
zacc += SIMSTATE.ygyro * GPS_RAW.v;
if xacc/zacc >= 1:
return 0
if xacc/zacc <= -1:
return -0
return degrees(-asin(xacc/zacc))
def distance_two(GPS_RAW1, GPS_RAW2):
'''distance between two points'''
lat1 = radians(GPS_RAW1.lat)
lat2 = radians(GPS_RAW2.lat)
lon1 = radians(GPS_RAW1.lon)
lon2 = radians(GPS_RAW2.lon)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = sin(0.5*dLat) * sin(0.5*dLat) + sin(0.5*dLon) * sin(0.5*dLon) * cos(lat1) * cos(lat2)
c = 2.0 * atan2(sqrt(a), sqrt(1.0-a))
return 6371 * 1000 * c
first_fix = None
def distance_home(GPS_RAW):
'''distance from first fix point'''
global first_fix
if first_fix == None or first_fix.fix_type < 2:
first_fix = GPS_RAW
return 0
return distance_two(GPS_RAW, first_fix)
| agpl-3.0 |
kjyv/FloBaRoID | tools/paramErrorPlot.py | 2 | 5150 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
import sys
from typing import AnyStr, List
# math
import numpy as np
import numpy.linalg as la
# plotting
import matplotlib.pyplot as plt
# kinematics, dynamics and URDF reading
import iDynTree; iDynTree.init_helpers(); iDynTree.init_numpy_helpers()
import argparse
parser = argparse.ArgumentParser(description='Scale mass and inertia from <model>.')
parser.add_argument('--ref_model', required=True, type=str, help='the file to load the reference parameters from')
parser.add_argument('--model', required=True, nargs='+', action='append', type=str, help='the file to load the parameters to compare from')
args = parser.parse_args()
def loadModelfromURDF(urdf_file):
# type: (AnyStr) -> (iDynTree.DynamicsComputations)
dynComp = iDynTree.DynamicsComputations()
dynComp.loadRobotModelFromFile(urdf_file)
return dynComp
def matToNumPy(mat):
return np.fromstring(mat.toString(), sep=' ').reshape(mat.rows(), mat.cols())
def vecToNumPy(vec):
return np.fromstring(vec.toString(), sep=' ')
def plotErrors(errors, labels):
fig, ax = plt.subplots()
num_vals = len(errors[0])
std_dev = np.zeros(num_vals)
index = np.arange(num_vals)
bar_width = 1/len(errors) - 0.1
opacity = 0.4
error_config = {'ecolor': '0.3'}
colors = ['g', 'r', 'b', 'y']
for i in range(len(errors)):
plt.bar(index+bar_width*i, errors[i], bar_width,
alpha=opacity,
color=colors[i],
yerr=std_dev,
error_kw=error_config,
label=labels[i])
plt.xlabel('Link Index')
plt.ylabel('Error Norm')
plt.title('Param error by method')
plt.xticks(index + bar_width / 2, linkNames, rotation='vertical')
plt.legend()
plt.tight_layout()
plt.show()
def getParamErrors(ref_model, p_model, num_links, group="COM"):
# type: (iDynTree.DynamicsComputations, iDynTree.DynamicsComputations, int, AnyStr) -> (List[float])
""" give error for groups of params """
errors = []
for link_id in range(num_links):
p_link_name = linkNames[link_id]
p_link_id = p_model.getLinkIndex(p_link_name)
p_spat_inertia = p_model.getLinkInertia(p_link_id)
p_inertia = matToNumPy(p_spat_inertia.getRotationalInertiaWrtCenterOfMass())
p_mass = p_spat_inertia.getMass()
p_com = vecToNumPy(p_spat_inertia.getCenterOfMass())
ref_link_name = linkNames[link_id]
ref_link_id = ref_model.getLinkIndex(ref_link_name)
ref_spat_inertia = ref_model.getLinkInertia(ref_link_id)
ref_inertia = matToNumPy(ref_spat_inertia.getRotationalInertiaWrtCenterOfMass())
ref_mass = ref_spat_inertia.getMass()
ref_com = vecToNumPy(ref_spat_inertia.getCenterOfMass())
if group == 'COM':
errors.append(ref_com - p_com)
elif group == 'mass':
errors.append(ref_mass - p_mass)
elif group == 'inertia':
errors.append(ref_inertia - p_inertia)
return errors
if __name__ == '__main__':
ref_model = loadModelfromURDF(args.ref_model)
generator = iDynTree.DynamicsRegressorGenerator()
if not generator.loadRobotAndSensorsModelFromFile(args.ref_model):
sys.exit()
regrXml = '''
<regressor>
<jointTorqueDynamics>
<allJoints/>
</jointTorqueDynamics>
</regressor>'''
generator.loadRegressorStructureFromString(regrXml)
num_links = generator.getNrOfLinks() - generator.getNrOfFakeLinks()
linkNames = [] # type: List[AnyStr]
import re
for d in generator.getDescriptionOfParameters().strip().split("\n"):
link = re.findall(r"of link (.*)", d)[0]
if link not in linkNames:
linkNames.append(link)
methods_com_errors = []
methods_inertia_errors = []
# check input order
print(args.model)
for filename in args.model:
p_model = loadModelfromURDF(filename[0])
# mass error norm
mass_errors = la.norm(getParamErrors(ref_model, p_model, num_links, group='mass'))
# com error norm
com_errors = la.norm(getParamErrors(ref_model, p_model, num_links, group='COM'), axis=1)
# inertia error norm
inertia_error_tensors = getParamErrors(ref_model, p_model, num_links, group='inertia')
inertia_errors = []
for i in range(len(inertia_error_tensors)):
inertia_errors.append(la.norm(inertia_error_tensors[i]))
#methods_mass_errors.append(mass_errors)
methods_com_errors.append(com_errors)
methods_inertia_errors.append(inertia_errors)
#labels = ['ID COM (Kown Mass)', "(3) ID Inertia (Known Mass + ID'd COM)", '(2) ID Inertia + COM (Known Mass)', '(1) ID all (20% wrong masses)']
labels = ['ID Inertia + COM (Known Mass)', 'ID all parameters (20% wrong masses)']
plotErrors(methods_com_errors, labels=labels)
plotErrors(methods_inertia_errors, labels=labels)
| lgpl-3.0 |
synth3tk/the-blue-alliance | tests/test_apiv2_team_controller.py | 1 | 15175 | import unittest2
import webtest
import json
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.event_type import EventType
from controllers.api.api_district_controller import ApiDistrictTeamsController
from controllers.api.api_team_controller import ApiTeamController, ApiTeamEventsController, ApiTeamMediaController,\
ApiTeamListController, ApiTeamHistoryRobotsController, \
ApiTeamHistoryDistrictsController
from consts.award_type import AwardType
from consts.event_type import EventType
from models.award import Award
from models.district_team import DistrictTeam
from models.event import Event
from models.event_team import EventTeam
from models.match import Match
from models.media import Media
from models.robot import Robot
from models.team import Team
class TestTeamApiController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<team_key:>', ApiTeamController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(root_path=".")
self.team = Team(
id="frc281",
name="Michelin / Caterpillar / Greenville Technical College /\
jcpenney / Baldor / ASME / Gastroenterology Associates /\
Laserflex South & Greenville County Schools & Greenville\
Technical Charter High School",
team_number=281,
rookie_year=1999,
nickname="EnTech GreenVillians",
address="Greenville, SC, USA",
website="www.entech.org",
motto = "Infiltrating Young Minds One Robot at a Time",
)
self.team.put()
def tearDown(self):
self.testbed.deactivate()
def assertTeamJson(self, team):
self.assertEqual(team["key"], self.team.key_name)
self.assertEqual(team["team_number"], self.team.team_number)
self.assertEqual(team["nickname"], self.team.nickname)
self.assertEqual(team["location"], self.team.location)
self.assertEqual(team["locality"], "Greenville")
self.assertEqual(team["country_name"], "USA")
self.assertEqual(team["region"], "SC")
self.assertEqual(team["website"], self.team.website)
self.assertEqual(team["rookie_year"], self.team.rookie_year)
self.assertEqual(team["motto"], self.team.motto)
def testTeamApi(self):
response = self.testapp.get('/frc281', headers={"X-TBA-App-Id": "tba-tests:team-controller-test:v01"})
team_dict = json.loads(response.body)
self.assertTeamJson(team_dict)
class TestTeamEventsApiController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<team_key:>/<year:>', ApiTeamEventsController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(root_path=".")
self.team = Team(
id="frc281",
name="Michelin / Caterpillar / Greenville Technical College /\
jcpenney / Baldor / ASME / Gastroenterology Associates /\
Laserflex South & Greenville County Schools & Greenville\
Technical Charter High School",
team_number=281,
nickname="EnTech GreenVillians",
address="Greenville, SC, USA",
website="www.entech.org",
)
self.team.put()
self.event = Event(
id="2010sc",
name="Palmetto Regional",
event_type_enum=EventType.REGIONAL,
short_name="Palmetto",
event_short="sc",
year=datetime.now().year,
end_date=datetime(2010, 03, 27),
official=True,
location='Clemson, SC',
start_date=datetime(2010, 03, 24),
)
self.event.put()
self.event_team = EventTeam(
team=self.team.key,
event=self.event.key,
year=2010
)
self.event_team.put()
def tearDown(self):
self.testbed.deactivate()
def assertEventJson(self, event):
self.assertEqual(event["key"], self.event.key_name)
self.assertEqual(event["name"], self.event.name)
self.assertEqual(event["short_name"], self.event.short_name)
self.assertEqual(event["official"], self.event.official)
self.assertEqual(event["start_date"], self.event.start_date.date().isoformat())
self.assertEqual(event["end_date"], self.event.end_date.date().isoformat())
self.assertEqual(event["event_type_string"], self.event.event_type_str)
self.assertEqual(event["event_type"], self.event.event_type_enum)
def testTeamApi(self):
response = self.testapp.get('/frc281/2010', headers={"X-TBA-App-Id": "tba-tests:team-controller-test:v01"})
event_dict = json.loads(response.body)
self.assertEventJson(event_dict[0])
class TestDistrictTeamsApiController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<district_abbrev:>/<year:([0-9]*)>', ApiDistrictTeamsController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(root_path=".")
self.team = Team(
id="frc281",
name="Michelin / Caterpillar / Greenville Technical College /\
jcpenney / Baldor / ASME / Gastroenterology Associates /\
Laserflex South & Greenville County Schools & Greenville\
Technical Charter High School",
team_number=281,
nickname="EnTech GreenVillians",
address="Greenville, SC, USA",
website="www.entech.org",
motto = "Infiltrating Young Minds One Robot at a Time",
)
self.district_team = DistrictTeam(
id="2015ne_frc281",
team=self.team.key,
year=2015,
district=3
)
self.team.put()
self.district_team.put()
def tearDown(self):
self.testbed.deactivate()
def assertTeamJson(self, team):
team = team[0]
self.assertEqual(team["key"], self.team.key_name)
self.assertEqual(team["team_number"], self.team.team_number)
self.assertEqual(team["nickname"], self.team.nickname)
self.assertEqual(team["location"], self.team.location)
self.assertEqual(team["locality"], "Greenville")
self.assertEqual(team["country_name"], "USA")
self.assertEqual(team["region"], "SC")
self.assertEqual(team["website"], self.team.website)
self.assertEqual(team["motto"], self.team.motto)
def testDistrictsApi(self):
response = self.testapp.get('/ne/2015', headers={"X-TBA-App-Id": "tba-tests:team-districts-controller-test:v01"})
teams = json.loads(response.body)
self.assertTeamJson(teams)
class TestTeamMediaApiController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<team_key:>/<year:>', ApiTeamMediaController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(root_path=".")
self.team = Team(
id="frc254",
name="very long name",
team_number=254,
nickname="Teh Chezy Pofs",
address="Greenville, SC, USA"
)
self.team.put()
self.cdmedia = Media(
key=ndb.Key('Media', 'cdphotothread_39894'),
details_json=u'{"image_partial": "fe3/fe38d320428adf4f51ac969efb3db32c_l.jpg"}',
foreign_key=u'39894',
media_type_enum=1,
references=[ndb.Key('Team', 'frc254')],
year=2014)
self.cdmedia.put()
self.cddetails = dict()
self.cddetails["image_partial"] = "fe3/fe38d320428adf4f51ac969efb3db32c_l.jpg"
self.ytmedia = Media(
key=ndb.Key('Media', 'youtube_aFZy8iibMD0'),
details_json=None,
foreign_key=u'aFZy8iibMD0',
media_type_enum=0,
references=[ndb.Key('Team', 'frc254')],
year=2014)
self.ytmedia.put()
def tearDown(self):
self.testbed.deactivate()
def testTeamMediaApi(self):
response = self.testapp.get('/frc254/2014', headers={"X-TBA-App-Id": "tba-tests:team_media-controller-test:v01"})
media = json.loads(response.body)
self.assertEqual(len(media), 2)
cd = media[0]
self.assertEqual(cd["type"], "cdphotothread")
self.assertEqual(cd["foreign_key"], "39894")
self.assertEqual(cd["details"], self.cddetails)
yt = media[1]
self.assertEqual(yt["type"], "youtube")
self.assertEqual(yt["foreign_key"], "aFZy8iibMD0")
self.assertEqual(yt["details"], {})
class TestTeamListApiController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<page_num:([0-9]*)>', ApiTeamListController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(root_path=".")
self.team1 = Team(
id="frc123",
name="SomeName",
team_number=123,
nickname="SomeNickname",
address="San Jose, CA, USA",
website="www.website.com",
)
self.team2 = Team(
id="frc4567",
name="SomeName",
team_number=4567,
nickname="SomeNickname",
address="San Jose, CA, USA",
website="www.website.com",
)
self.team1.put()
self.team2.put()
def tearDown(self):
self.testbed.deactivate()
def assertTeam1Json(self, team):
self.assertEqual(team["key"], self.team1.key_name)
self.assertEqual(team["name"], self.team1.name)
self.assertEqual(team["team_number"], self.team1.team_number)
def assertTeam2Json(self, team):
self.assertEqual(team["key"], self.team2.key_name)
self.assertEqual(team["name"], self.team2.name)
self.assertEqual(team["team_number"], self.team2.team_number)
def testTeamListApi(self):
response = self.testapp.get('/0', headers={"X-TBA-App-Id": "tba-tests:team_list-controller-test:v01"})
team_list = json.loads(response.body)
self.assertTeam1Json(team_list[0])
response = self.testapp.get('/9', headers={"X-TBA-App-Id": "tba-tests:team_list-controller-test:v01"})
team_list = json.loads(response.body)
self.assertTeam2Json(team_list[0])
response = self.testapp.get('/10', headers={"X-TBA-App-Id": "tba-tests:team_list-controller-test:v01"})
team_list = json.loads(response.body)
self.assertEqual(team_list, [])
class TestTeamHistoryRobotsApiController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<team_key:>', ApiTeamHistoryRobotsController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(root_path=".")
self.team = Team(
id="frc1124",
name="UberBots",
team_number=1124,
nickname="UberBots",
)
self.robot = Robot(
id="frc1124_2015",
team=self.team.key,
year=2015,
robot_name="Orion"
)
self.team.put()
self.robot.put()
def tearDown(self):
self.testbed.deactivate()
def testRobotApi(self):
response = self.testapp.get('/frc1124', headers={"X-TBA-App-Id": "tba-tests:team_list-controller-test:v01"})
robot_dict = json.loads(response.body)
self.assertTrue("2015" in robot_dict)
robot = robot_dict["2015"]
self.assertEqual(robot["key"], "frc1124_2015")
self.assertEqual(robot["team_key"], "frc1124")
self.assertEqual(robot["year"], 2015)
self.assertEqual(robot["name"], "Orion")
class TestTeamHistoryDistrictsApiController(unittest2.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([webapp2.Route(r'/<team_key:>', ApiTeamHistoryDistrictsController, methods=['GET'])], debug=True)
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(root_path=".")
self.team = Team(
id="frc1124",
name="UberBots",
team_number=1124,
nickname="UberBots",
)
self.district_team = DistrictTeam(
id="2015ne_frc1124",
team=self.team.key,
year=2015,
district=3
)
self.team.put()
self.district_team.put()
def tearDown(self):
self.testbed.deactivate()
def testDistrictsApi(self):
response = self.testapp.get('/frc1124', headers={"X-TBA-App-Id": "tba-tests:team-history-districts-controller-test:v01"})
district_dict = json.loads(response.body)
self.assertTrue("2015" in district_dict)
district_key = district_dict["2015"]
self.assertEqual(district_key, "2015ne")
| mit |
cameron581/android_kernel_lge_msm8974 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
wangtuo/elasticsearch | dev-tools/prepare_release_update_documentation.py | 269 | 5009 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Prepare a release: Update the documentation and commit
#
# USAGE:
#
# python3 ./dev-tools/prepare_release_update_documentation.py
#
# Note: Ensure the script is run from the root directory
# This script needs to be run and then pushed,
# before proceeding with prepare_release_create-release-version.py
# on your build VM
#
import fnmatch
import subprocess
import tempfile
import re
import os
import shutil
def run(command):
if os.system('%s' % (command)):
raise RuntimeError(' FAILED: %s' % (command))
def ensure_checkout_is_clean():
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True)
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
raise RuntimeError('git status shows untracked files: got:\n%s' % s)
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch: got:\n%s' % (s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch: got:\n%s' % (s))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version():
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch')
# Stages the given files for the next git commit
def add_pending_files(*files):
for file in files:
if file:
# print("Adding file: %s" % (file))
run('git add %s' % (file))
# Updates documentation feature flags
def commit_feature_flags(release):
run('git commit -m "Update Documentation Feature Flags [%s]"' % release)
# Walks the given directory path (defaults to 'docs')
# and replaces all 'coming[$version]' tags with
# 'added[$version]'. This method only accesses asciidoc files.
def update_reference_docs(release_version, path='docs'):
pattern = 'coming[%s' % (release_version)
replacement = 'added[%s' % (release_version)
pending_files = []
def callback(line):
return line.replace(pattern, replacement)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.asciidoc'):
full_path = os.path.join(root, file_name)
if process_file(full_path, callback):
pending_files.append(os.path.join(root, file_name))
return pending_files
if __name__ == "__main__":
release_version = find_release_version()
print('*** Preparing release version documentation: [%s]' % release_version)
ensure_checkout_is_clean()
pending_files = update_reference_docs(release_version)
if pending_files:
add_pending_files(*pending_files) # expects var args use * to expand
commit_feature_flags(release_version)
else:
print('WARNING: no documentation references updates for release %s' % (release_version))
print('*** Done.')
| apache-2.0 |
h3llrais3r/SickRage | lib/hachoir_parser/misc/ole2.py | 74 | 14203 | """
Microsoft Office documents parser.
OLE2 files are also used by many other programs to store data.
Informations:
* wordole.c of AntiWord program (v0.35)
Copyright (C) 1998-2003 A.J. van Os
Released under GNU GPL
http://www.winfield.demon.nl/
* File gsf-infile-msole.c of libgsf library (v1.14.0)
Copyright (C) 2002-2004 Jody Goldberg ([email protected])
Released under GNU LGPL 2.1
http://freshmeat.net/projects/libgsf/
* PDF from AAF Association
Copyright (C) 2004 AAF Association
Copyright (C) 1991-2003 Microsoft Corporation
http://www.aafassociation.org/html/specs/aafcontainerspec-v1.0.1.pdf
Author: Victor Stinner
Creation: 2006-04-23
"""
from hachoir_parser import HachoirParser
from hachoir_core.field import (
FieldSet, ParserError, SeekableFieldSet, RootSeekableFieldSet,
UInt8, UInt16, UInt32, UInt64, TimestampWin64, Enum,
Bytes, NullBytes, String)
from hachoir_core.text_handler import filesizeHandler
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_parser.common.win32 import GUID
from hachoir_parser.misc.msoffice import PROPERTY_NAME, RootEntry, RawParser, CustomFragment
MIN_BIG_BLOCK_LOG2 = 6 # 512 bytes
MAX_BIG_BLOCK_LOG2 = 14 # 64 kB
# Number of items in DIFAT
NB_DIFAT = 109
class SECT(UInt32):
UNUSED = 0xFFFFFFFF # -1
END_OF_CHAIN = 0xFFFFFFFE # -2
BFAT_SECTOR = 0xFFFFFFFD # -3
DIFAT_SECTOR = 0xFFFFFFFC # -4
SPECIALS = set((END_OF_CHAIN, UNUSED, BFAT_SECTOR, DIFAT_SECTOR))
special_value_name = {
UNUSED: "unused",
END_OF_CHAIN: "end of a chain",
BFAT_SECTOR: "BFAT sector (in a FAT)",
DIFAT_SECTOR: "DIFAT sector (in a FAT)",
}
def __init__(self, parent, name, description=None):
UInt32.__init__(self, parent, name, description)
def createDisplay(self):
val = self.value
return SECT.special_value_name.get(val, str(val))
class Property(FieldSet):
TYPE_ROOT = 5
TYPE_NAME = {
1: "storage",
2: "stream",
3: "ILockBytes",
4: "IPropertyStorage",
5: "root"
}
DECORATOR_NAME = {
0: "red",
1: "black",
}
static_size = 128 * 8
def createFields(self):
bytes = self.stream.readBytes(self.absolute_address, 4)
if bytes == "\0R\0\0":
charset = "UTF-16-BE"
else:
charset = "UTF-16-LE"
yield String(self, "name", 64, charset=charset, truncate="\0")
yield UInt16(self, "namelen", "Length of the name")
yield Enum(UInt8(self, "type", "Property type"), self.TYPE_NAME)
yield Enum(UInt8(self, "decorator", "Decorator"), self.DECORATOR_NAME)
yield SECT(self, "left")
yield SECT(self, "right")
yield SECT(self, "child", "Child node (valid for storage and root types)")
yield GUID(self, "clsid", "CLSID of this storage (valid for storage and root types)")
yield NullBytes(self, "flags", 4, "User flags")
yield TimestampWin64(self, "creation", "Creation timestamp(valid for storage and root types)")
yield TimestampWin64(self, "lastmod", "Modify timestamp (valid for storage and root types)")
yield SECT(self, "start", "Starting SECT of the stream (valid for stream and root types)")
if self["/header/bb_shift"].value == 9:
yield filesizeHandler(UInt32(self, "size", "Size in bytes (valid for stream and root types)"))
yield NullBytes(self, "padding", 4)
else:
yield filesizeHandler(UInt64(self, "size", "Size in bytes (valid for stream and root types)"))
def createDescription(self):
name = self["name"].display
size = self["size"].display
return "Property: %s (%s)" % (name, size)
class DIFat(SeekableFieldSet):
def __init__(self, parent, name, db_start, db_count, description=None):
SeekableFieldSet.__init__(self, parent, name, description)
self.start=db_start
self.count=db_count
def createFields(self):
for index in xrange(NB_DIFAT):
yield SECT(self, "index[%u]" % index)
difat_sect = self.start
index = NB_DIFAT
entries_per_sect = self.parent.sector_size / 32 - 1
for ctr in xrange(self.count):
# this is relative to real DIFAT start
self.seekBit(NB_DIFAT*SECT.static_size + self.parent.sector_size*difat_sect)
for sect_index in xrange(entries_per_sect):
yield SECT(self, "index[%u]" % (index+sect_index))
index += entries_per_sect
next = SECT(self, "difat[%u]" % ctr)
yield next
difat_sect = next.value
class Header(FieldSet):
static_size = 68 * 8
def createFields(self):
yield GUID(self, "clsid", "16 bytes GUID used by some apps")
yield UInt16(self, "ver_min", "Minor version")
yield UInt16(self, "ver_maj", "Major version")
yield Bytes(self, "endian", 2, "Endian (\\xfe\\xff for little endian)")
yield UInt16(self, "bb_shift", "Log, base 2, of the big block size")
yield UInt16(self, "sb_shift", "Log, base 2, of the small block size")
yield NullBytes(self, "reserved[]", 6, "(reserved)")
yield UInt32(self, "csectdir", "Number of SECTs in directory chain for 4 KB sectors (version 4)")
yield UInt32(self, "bb_count", "Number of Big Block Depot blocks")
yield SECT(self, "bb_start", "Root start block")
yield NullBytes(self, "transaction", 4, "Signature used for transactions (must be zero)")
yield UInt32(self, "threshold", "Maximum size for a mini stream (typically 4096 bytes)")
yield SECT(self, "sb_start", "Small Block Depot start block")
yield UInt32(self, "sb_count")
yield SECT(self, "db_start", "First block of DIFAT")
yield UInt32(self, "db_count", "Number of SECTs in DIFAT")
# Header (ole_id, header, difat) size in bytes
HEADER_SIZE = 64 + Header.static_size + NB_DIFAT * SECT.static_size
class SectFat(FieldSet):
def __init__(self, parent, name, start, count, description=None):
FieldSet.__init__(self, parent, name, description, size=count*32)
self.count = count
self.start = start
def createFields(self):
for i in xrange(self.start, self.start + self.count):
yield SECT(self, "index[%u]" % i)
class OLE2_File(HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "ole2",
"category": "misc",
"file_ext": (
"db", # Thumbs.db
"doc", "dot", # Microsoft Word
"ppt", "ppz", "pps", "pot", # Microsoft Powerpoint
"xls", "xla", # Microsoft Excel
"msi", # Windows installer
),
"mime": (
u"application/msword",
u"application/msexcel",
u"application/mspowerpoint",
),
"min_size": 512*8,
"description": "Microsoft Office document",
"magic": (("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", 0),),
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self["ole_id"].value != "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1":
return "Invalid magic"
if self["header/ver_maj"].value not in (3, 4):
return "Unknown major version (%s)" % self["header/ver_maj"].value
if self["header/endian"].value not in ("\xFF\xFE", "\xFE\xFF"):
return "Unknown endian (%s)" % self["header/endian"].raw_display
if not(MIN_BIG_BLOCK_LOG2 <= self["header/bb_shift"].value <= MAX_BIG_BLOCK_LOG2):
return "Invalid (log 2 of) big block size (%s)" % self["header/bb_shift"].value
if self["header/bb_shift"].value < self["header/sb_shift"].value:
return "Small block size (log2=%s) is bigger than big block size (log2=%s)!" \
% (self["header/sb_shift"].value, self["header/bb_shift"].value)
return True
def createFields(self):
# Signature
yield Bytes(self, "ole_id", 8, "OLE object signature")
header = Header(self, "header")
yield header
# Configure values
self.sector_size = (8 << header["bb_shift"].value)
self.fat_count = header["bb_count"].value
self.items_per_bbfat = self.sector_size / SECT.static_size
self.ss_size = (8 << header["sb_shift"].value)
self.items_per_ssfat = self.items_per_bbfat
# Read DIFAT (one level of indirection)
yield DIFat(self, "difat", header["db_start"].value, header["db_count"].value, "Double Indirection FAT")
# Read FAT (one level of indirection)
for field in self.readBFAT():
yield field
# Read SFAT
for field in self.readSFAT():
yield field
# Read properties
chain = self.getChain(self["header/bb_start"].value)
prop_per_sector = self.sector_size // Property.static_size
self.properties = []
for block in chain:
self.seekBlock(block)
for index in xrange(prop_per_sector):
property = Property(self, "property[]")
yield property
self.properties.append(property)
# Parse first property
for index, property in enumerate(self.properties):
if index == 0:
name, parser = 'root', RootEntry
else:
try:
name, parser = PROPERTY_NAME[property["name"].value]
except LookupError:
name = property.name+"content"
parser = RawParser
for field in self.parseProperty(property, name, parser):
yield field
def parseProperty(self, property, name_prefix, parser=RawParser):
if not property["size"].value:
return
if property["size"].value < self["header/threshold"].value and name_prefix!='root':
return
name = "%s[]" % name_prefix
first = None
previous = None
size = 0
fragment_group = None
chain = self.getChain(property["start"].value)
while True:
try:
block = chain.next()
contiguous = False
if first is None:
first = block
contiguous = True
if previous is not None and block == (previous+1):
contiguous = True
if contiguous:
previous = block
size += self.sector_size
continue
except StopIteration:
block = None
if first is None:
break
self.seekBlock(first)
desc = "Big blocks %s..%s (%s)" % (first, previous, previous-first+1)
desc += " of %s bytes" % (self.sector_size // 8)
field = CustomFragment(self, name, size, parser, desc, fragment_group)
if not fragment_group:
fragment_group = field.group
fragment_group.args["datasize"] = property["size"].value
fragment_group.args["ole2name"] = property["name"].value
yield field
if block is None:
break
first = block
previous = block
size = self.sector_size
def getChain(self, start, use_sfat=False):
if use_sfat:
fat = self.ss_fat
items_per_fat = self.items_per_ssfat
err_prefix = "SFAT chain"
else:
fat = self.bb_fat
items_per_fat = self.items_per_bbfat
err_prefix = "BFAT chain"
block = start
block_set = set()
previous = block
while block != SECT.END_OF_CHAIN:
if block in SECT.SPECIALS:
raise ParserError("%s: Invalid block index (0x%08x), previous=%s" % (err_prefix, block, previous))
if block in block_set:
raise ParserError("%s: Found a loop (%s=>%s)" % (err_prefix, previous, block))
block_set.add(block)
yield block
previous = block
index = block // items_per_fat
try:
block = fat[index]["index[%u]" % block].value
except LookupError, err:
break
def readBFAT(self):
self.bb_fat = []
start = 0
count = self.items_per_bbfat
for index, block in enumerate(self.array("difat/index")):
block = block.value
if block == SECT.UNUSED:
break
desc = "FAT %u/%u at block %u" % \
(1+index, self["header/bb_count"].value, block)
self.seekBlock(block)
field = SectFat(self, "bbfat[]", start, count, desc)
yield field
self.bb_fat.append(field)
start += count
def readSFAT(self):
chain = self.getChain(self["header/sb_start"].value)
start = 0
self.ss_fat = []
count = self.items_per_ssfat
for index, block in enumerate(chain):
self.seekBlock(block)
field = SectFat(self, "sfat[]", \
start, count, \
"SFAT %u/%u at block %u" % \
(1+index, self["header/sb_count"].value, block))
yield field
self.ss_fat.append(field)
start += count
def createContentSize(self):
max_block = 0
for fat in self.array("bbfat"):
for entry in fat:
block = entry.value
if block not in SECT.SPECIALS:
max_block = max(block, max_block)
if max_block in SECT.SPECIALS:
return None
else:
return HEADER_SIZE + (max_block+1) * self.sector_size
def seekBlock(self, block):
self.seekBit(HEADER_SIZE + block * self.sector_size)
| gpl-3.0 |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/multiprocessing/reduction.py | 6 | 9361 | #
# Module which deals with pickling of objects.
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from abc import ABCMeta
import copyreg
import functools
import io
import os
import pickle
import socket
import sys
from . import context
__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
HAVE_SEND_HANDLE = (sys.platform == 'win32' or
(hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS') and
hasattr(socket.socket, 'sendmsg')))
#
# Pickler subclass
#
class ForkingPickler(pickle.Pickler):
'''Pickler subclass used by multiprocessing.'''
_extra_reducers = {}
_copyreg_dispatch_table = copyreg.dispatch_table
def __init__(self, *args):
super().__init__(*args)
self.dispatch_table = self._copyreg_dispatch_table.copy()
self.dispatch_table.update(self._extra_reducers)
@classmethod
def register(cls, type, reduce):
'''Register a reduce function for a type.'''
cls._extra_reducers[type] = reduce
@classmethod
def dumps(cls, obj, protocol=None):
buf = io.BytesIO()
cls(buf, protocol).dump(obj)
return buf.getbuffer()
loads = pickle.loads
register = ForkingPickler.register
def dump(obj, file, protocol=None):
'''Replacement for pickle.dump() using ForkingPickler.'''
ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
#
if sys.platform == 'win32':
# Windows
__all__ += ['DupHandle', 'duplicate', 'steal_handle']
import _winapi
def duplicate(handle, target_process=None, inheritable=False):
'''Duplicate a handle. (target_process is a handle not a pid!)'''
if target_process is None:
target_process = _winapi.GetCurrentProcess()
return _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle, target_process,
0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
def steal_handle(source_pid, handle):
'''Steal a handle from process identified by source_pid.'''
source_process_handle = _winapi.OpenProcess(
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
try:
return _winapi.DuplicateHandle(
source_process_handle, handle,
_winapi.GetCurrentProcess(), 0, False,
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(source_process_handle)
def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
conn.send(dh)
def recv_handle(conn):
'''Receive a handle over a local connection.'''
return conn.recv().detach()
class DupHandle(object):
'''Picklable wrapper for a handle.'''
def __init__(self, handle, access, pid=None):
if pid is None:
# We just duplicate the handle in the current process and
# let the receiving process steal the handle.
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
'''Get the handle. This should only be called once.'''
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
# The handle has already been duplicated for this process.
return self._handle
# We must steal the handle from the process whose pid is self._pid.
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
else:
# Unix
__all__ += ['DupFd', 'sendfds', 'recvfds']
import array
# On MacOSX we should acknowledge receipt of fds -- see Issue14669
ACKNOWLEDGE = sys.platform == 'darwin'
def sendfds(sock, fds):
'''Send an array of fds over an AF_UNIX socket.'''
fds = array.array('i', fds)
msg = bytes([len(fds) % 256])
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
if ACKNOWLEDGE and sock.recv(1) != b'A':
raise RuntimeError('did not receive acknowledgement of fd')
def recvfds(sock, size):
'''Receive an array of fds over an AF_UNIX socket.'''
a = array.array('i')
bytes_size = a.itemsize * size
msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size))
if not msg and not ancdata:
raise EOFError
try:
if ACKNOWLEDGE:
sock.send(b'A')
if len(ancdata) != 1:
raise RuntimeError('received %d items of ancdata' %
len(ancdata))
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
if len(cmsg_data) % a.itemsize != 0:
raise ValueError
a.frombytes(cmsg_data)
if len(a) % 256 != msg[0]:
raise AssertionError(
"Len is {0:n} but msg[0] is {1!r}".format(
len(a), msg[0]))
return list(a)
except (ValueError, IndexError):
pass
raise RuntimeError('Invalid data received')
def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
sendfds(s, [handle])
def recv_handle(conn):
'''Receive a handle over a local connection.'''
with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
return recvfds(s, 1)[0]
def DupFd(fd):
'''Return a wrapper for an fd.'''
popen_obj = context.get_spawning_popen()
if popen_obj is not None:
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
elif HAVE_SEND_HANDLE:
from . import resource_sharer
return resource_sharer.DupFd(fd)
else:
raise ValueError('SCM_RIGHTS appears not to be available')
#
# Try making some callable types picklable
#
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
register(type(_C().f), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
#
# Make sockets picklable
#
if sys.platform == 'win32':
def _reduce_socket(s):
from .resource_sharer import DupSocket
return _rebuild_socket, (DupSocket(s),)
def _rebuild_socket(ds):
return ds.detach()
register(socket.socket, _reduce_socket)
else:
def _reduce_socket(s):
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto)
def _rebuild_socket(df, family, type, proto):
fd = df.detach()
return socket.socket(family, type, proto, fileno=fd)
register(socket.socket, _reduce_socket)
class AbstractReducer(metaclass=ABCMeta):
'''Abstract base class for use in implementing a Reduction class
suitable for use in replacing the standard reduction mechanism
used in multiprocessing.'''
ForkingPickler = ForkingPickler
register = register
dump = dump
send_handle = send_handle
recv_handle = recv_handle
if sys.platform == 'win32':
steal_handle = steal_handle
duplicate = duplicate
DupHandle = DupHandle
else:
sendfds = sendfds
recvfds = recvfds
DupFd = DupFd
_reduce_method = _reduce_method
_reduce_method_descriptor = _reduce_method_descriptor
_rebuild_partial = _rebuild_partial
_reduce_socket = _reduce_socket
_rebuild_socket = _rebuild_socket
def __init__(self, *args):
register(type(_C().f), _reduce_method)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
register(functools.partial, _reduce_partial)
register(socket.socket, _reduce_socket)
| apache-2.0 |
Nyker510/scikit-learn | sklearn/tests/test_random_projection.py | 142 | 14033 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
alberto-antonietti/nest-simulator | pynest/nest/tests/test_connect_fixed_indegree.py | 10 | 5729 | # -*- coding: utf-8 -*-
#
# test_connect_fixed_indegree.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import unittest
import scipy.stats
from . import test_connect_helpers as hf
from .test_connect_parameters import TestParams
class TestFixedInDegree(TestParams):
# specify connection pattern and specific params
rule = 'fixed_indegree'
conn_dict = {'rule': rule}
# sizes of source-, target-population and outdegree for connection test
# and tests in test_Params
N1 = 50
N2 = 70
Nin = 10
conn_dict['indegree'] = Nin
# sizes of source-, target-population and outdegree for statistical test
N_s = 10
N_t = 10
C = 10
# Critical values and number of iterations of two level test
stat_dict = {'alpha2': 0.05, 'n_runs': 50}
# tested on each mpi process separately
def testErrorMessages(self):
got_error = False
conn_params = self.conn_dict.copy()
conn_params['allow_autapses'] = True
conn_params['allow_multapses'] = False
conn_params['indegree'] = self.N1 + 1
try:
self.setUpNetwork(conn_params)
except hf.nest.kernel.NESTError:
got_error = True
self.assertTrue(got_error)
def testInDegree(self):
conn_params = self.conn_dict.copy()
conn_params['allow_autapses'] = False
conn_params['allow_multapses'] = False
self.setUpNetwork(conn_params)
# make sure the indegree is right
M = hf.get_connectivity_matrix(self.pop1, self.pop2)
inds = np.sum(M, axis=1)
hf.mpi_assert(inds, self.Nin * np.ones(self.N2), self)
# make sure no connections were drawn from the target to the source
# population
M = hf.get_connectivity_matrix(self.pop2, self.pop1)
M_none = np.zeros((len(self.pop1), len(self.pop2)))
hf.mpi_assert(M, M_none, self)
def testStatistics(self):
conn_params = self.conn_dict.copy()
conn_params['allow_autapses'] = True
conn_params['allow_multapses'] = True
conn_params['indegree'] = self.C
expected = hf.get_expected_degrees_fixedDegrees(
self.C, 'in', self.N_s, self.N_t)
pvalues = []
for i in range(self.stat_dict['n_runs']):
hf.reset_seed(i, self.nr_threads)
self.setUpNetwork(conn_dict=conn_params, N1=self.N_s, N2=self.N_t)
degrees = hf.get_degrees('out', self.pop1, self.pop2)
degrees = hf.gather_data(degrees)
if degrees is not None:
chi, p = hf.chi_squared_check(degrees, expected)
pvalues.append(p)
hf.mpi_barrier()
if degrees is not None:
ks, p = scipy.stats.kstest(pvalues, 'uniform')
self.assertTrue(p > self.stat_dict['alpha2'])
def testAutapsesTrue(self):
conn_params = self.conn_dict.copy()
N = 10
conn_params['allow_multapses'] = False
# test that autapses exist
conn_params['indegree'] = N
conn_params['allow_autapses'] = True
pop = hf.nest.Create('iaf_psc_alpha', N)
hf.nest.Connect(pop, pop, conn_params)
# make sure all connections do exist
M = hf.get_connectivity_matrix(pop, pop)
hf.mpi_assert(np.diag(M), np.ones(N), self)
def testAutapsesFalse(self):
conn_params = self.conn_dict.copy()
N = 10
conn_params['allow_multapses'] = False
# test that autapses were excluded
conn_params['indegree'] = N - 1
conn_params['allow_autapses'] = False
pop = hf.nest.Create('iaf_psc_alpha', N)
hf.nest.Connect(pop, pop, conn_params)
# make sure all connections do exist
M = hf.get_connectivity_matrix(pop, pop)
hf.mpi_assert(np.diag(M), np.zeros(N), self)
def testMultapsesTrue(self):
conn_params = self.conn_dict.copy()
N = 3
conn_params['allow_autapses'] = True
# test that multapses were drawn
conn_params['indegree'] = N + 1
conn_params['allow_multapses'] = True
pop = hf.nest.Create('iaf_psc_alpha', N)
hf.nest.Connect(pop, pop, conn_params)
nr_conns = len(hf.nest.GetConnections(pop, pop))
hf.mpi_assert(nr_conns, conn_params['indegree'] * N, self)
def testMultapsesFalse(self):
conn_params = self.conn_dict.copy()
N = 3
conn_params['allow_autapses'] = True
# test that no multapses exist
conn_params['indegree'] = N
conn_params['allow_multapses'] = False
pop = hf.nest.Create('iaf_psc_alpha', N)
hf.nest.Connect(pop, pop, conn_params)
M = hf.get_connectivity_matrix(pop, pop)
M = hf.gather_data(M)
if M is not None:
self.assertTrue(M.flatten, np.ones(N * N))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestFixedInDegree)
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
| gpl-2.0 |
supriyasawant/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/Bib_App.py | 8 | 17324 | from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import render_to_response #render uncomment when to use
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django_mongokit import get_database
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
from gnowsys_ndf.settings import GAPPS, MEDIA_ROOT
from gnowsys_ndf.ndf.models import GSystemType, Node
from gnowsys_ndf.ndf.views.methods import get_node_common_fields
from gnowsys_ndf.ndf.views.notify import set_notif_val
from gnowsys_ndf.ndf.views.methods import *
from gnowsys_ndf.ndf.models import *
collection = get_database()[Node.collection_name]
sitename=Site.objects.all()
if sitename :
sitename = sitename[0]
else :
sitename = ""
db = get_database()
collection = db[Node.collection_name]
collection_tr = db[Triple.collection_name]
history_manager = HistoryManager()
rcs = RCS()
Bibtex_entries=[]
dictionary={'article':["author","title","journal","year","volume","number","pages","month","key","note"]}
Bibtex_entries.append(dictionary)
dictionary={'book':["author","title","publisher","year","volume","series","address","edition","month","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'booklet':["title","author","howpublished","address","month","year","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'conference':["author","title","booktitle","year","editor","pages","organisation","publisher","month","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'inbook':["author","title","chapter","pages","publisher","year","volume","series","address","edition","month","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'incollection':["author","title","booktitle","year","editor","pages","organisation","publisher","address","month","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'inproceedings':["author","title","booktitle","year","editor","pages","organisation","publisher","address","month","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'manual':["title","author","organisation","address","edition","month","year","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'masterthesis':["author","title","school","year","address","month","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'misc':["author","title","howpublished","month","year","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'phdthesis':["author","title","school","year","address","month","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'proceedings':["title","year","editor","publisher","organisation","address","month","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'techreport':["author","title","institution","year","type","number","address","month","note","key"]}
Bibtex_entries.append(dictionary)
dictionary={'unpublished':["author","title","note","month","year","key"]}
Bibtex_entries.append(dictionary)
##Bib_App function
def Bib_App(request, group_id):
"""
Renders the main page of the app
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = collection.Node.find_one({'_type': "Group","_id": group_id})
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
title = "Bibliographic Citations"
template = "ndf/Bib_App.html"
variable = RequestContext(request,{'title': title, 'group_id': group_id, 'groupid': group_id})
return render_to_response(template,variable)
def view_entries(request, group_id,node_id=None):
'''
renders the list view of all entries of a specific type when node_id is known
'''
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = collection.Node.find_one({'_type': "Group","name": group_id})
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
if node_id is None:
num = int(request.GET.get('num'))
entry=Bibtex_entries[num]
''' for retreiving the name of the entry from the corresponding number
'''
for name, value in entry.iteritems():
title=name
GST_ENTRY = collection.Node.one({'_type': "GSystemType", 'name':name})
title=GST_ENTRY.name
else:
GST_ENTRY=collection.Node.one({'_id':'node_id'})
title=GST_ENTRY.name
entry_inst = collection.GSystem.find({'member_of': {'$all': [GST_ENTRY._id]}, 'group_set': {'$all': [ObjectId(group_id)]},'status':u'PUBLISHED'})
template = "ndf/view_entries.html"
variable = RequestContext(request, {'entry_inst': entry_inst, 'group_id': group_id, 'groupid': group_id,'title':title,'num':num})
return render_to_response(template,variable)
def view_entry(request,group_id,node_id):
''' renders list view of entries of a specific bibtex type when the type is known
'''
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = collection.Node.find_one({'_type': "Group","name": group_id})
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
GST_BIBTEX=collection.Node.one({'_id':ObjectId(node_id)})
title=GST_BIBTEX.name
entry_inst=collection.GSystem.find({'member_of':{'$all':[GST_BIBTEX._id]},'group_set':{'$all':[ObjectId(group_id)]},'status':u'PUBLISHED'})
template="ndf/view_entries.html"
variable = RequestContext(request, {'entry_inst': entry_inst, 'group_id': group_id, 'groupid': group_id,'title':title})
return render_to_response(template,variable)
def view_sentry(request,group_id,node_id):
''' for displaying a specific entry
'''
ins_objectid=ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = collection.Node.find_one({'_type': "Group","name": group_id})
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
GST_SENTRY=collection.Node.one({'_id':ObjectId(node_id)})
GST_one=collection.Node.one({'_type':'AttributeType','name':'BibTex_entry'})
gst_bibtex=GST_SENTRY.member_of
''' converts [ObjectId('node_id')] to node_id
'''
s=str(gst_bibtex)
list(s)
gst_bibtex=(s[11:-3])
gst_bibtex=unicode(gst_bibtex, "UTF-8")
gst_bibtex=collection.Node.one({'_id':ObjectId(gst_bibtex)})
gst_id=GST_SENTRY._id
GST_SAttribute=collection.Node.one({'subject':GST_SENTRY._id,'attribute_type.$id':GST_one._id})
Bibtex=GST_SAttribute.object_value
gst_note=GST_SENTRY.content_org
variable=RequestContext(request,{'name':GST_SENTRY.name,'gst_note':gst_note,'Bibtex':Bibtex,'group_id':group_id,'groupid':group_id,'title':gst_bibtex.name})
template="ndf/view_sentry.html"
print "before return"
return render_to_response(template,variable)
def create_entries(request, group_id):
''' for creating a new bibtex entry_list
'''
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = collection.Node.find_one({'_type': "Group","name": group_id})
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
''' for retreiving the fields of a particular bibtex entry
'''
num = int(request.GET.get('num'))
entry=Bibtex_entries[num]
for name, value in entry.iteritems():
title=name
list_item=value
GST_BIBTEX = collection.Node.one({'_type': 'GSystemType', 'name': title})
GST_ENTRY=collection.Node.one({'_type':'AttributeType','name':'BibTex_entry'})
GST_LIST=collection.Node.one({'_type':'AttributeType','name':'entry_list'})
GST_CITATION=collection.Node.one({'_type':'AttributeType','name':'Citation'})
context_variables = { 'title': title,
'group_id': group_id,
'groupid': group_id,
'list_item':list_item,
'num':num
}
entry_node = collection.GSystem()
cite=""
i=0
value=""
if request.method == "POST":
name=request.POST.get("name")
entry_node.name=name
citation_key=request.POST.get("citation_key")
var="@"+title+"{"+citation_key
value += "name$"+name+"%citation_key$"+citation_key+"%"
for each in list_item:
c = request.POST.get(each,"")
var += " , "+each+" = "+" { "+c+" }"
value += each + "$" + c + "%"
i = i+1
if (each == 'author' and c!= ""):
cite += c +"."
if(each == 'title' and c!= ""):
cite += c+'.'
if(each == 'year' and c != ""):
cite += "("+c+") "
if(each=='publisher' and c!=""):
cite += "publisher:"+ c + ","
if(each == 'volume' and c!=""):
cite += "volume:"+c +","
if(each == 'edition' and c!=""):
cite += "edition:"+c+","
if(each == 'pages' and c!=""):
cite += "page "+c+","
var +="}"
get_node_common_fields(request,entry_node,group_id,GST_BIBTEX)
entry_node.status=u'PUBLISHED'
entry_node.save()
'''
creating a GAttribute of AttributeType BibTex_entry for the already created GSystem
'''
GST_current=collection.Node.one({'name':name,'member_of':title})
Bibtex_entry=collection.GAttribute()
Bibtex_entry.name=unicode(name)
Bibtex_entry.subject=ObjectId(entry_node._id)
Bibtex_entry.attribute_type=GST_ENTRY
Bibtex_entry.object_value=unicode(var)
Bibtex_entry.save()
'''
creating a GAttribute of AttributeType Citation for the already created GSystem
'''
cite_key=collection.GAttribute()
cite_key.name=unicode(name)
cite_key.subject=ObjectId(entry_node._id)
cite_key.attribute_type=GST_CITATION
cite_key.object_value=unicode(cite)
cite_key.save()
'''
creating a GAttribute of AttributeType entry_list for the already created GSystem
'''
entry_list=collection.GAttribute()
entry_list.name=unicode(name)
entry_list.subject=ObjectId(entry_node._id)
entry_list.attribute_type=GST_LIST
entry_list.object_value=unicode(value)
entry_list.save()
return HttpResponseRedirect(reverse('view_entry', kwargs={'group_id': group_id, 'node_id': GST_BIBTEX._id}))
else:
return render_to_response("ndf/create_edit_entries.html",
context_variables,
context_instance=RequestContext(request))
def delete_sentry(request, group_id, node_id):
"""Change the status to Hidden.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = collection.Node.find_one({'_type': "Group","name": group_id})
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
gst_entry=collection.Node.one({'_id':ObjectId(node_id)})
gst_bibtex=gst_entry.member_of
''' converts [ObjectId('node_id')] to node_id
'''
s=str(gst_bibtex)
list(s)
gst_bibtex=(s[11:-3])
gst_bibtex=unicode(gst_bibtex, "UTF-8")
op = collection.update({'_id': ObjectId(node_id)}, {'$set': {'status': u"HIDDEN"}})
return HttpResponseRedirect(reverse('view_entry', kwargs={'group_id': group_id,'node_id':gst_bibtex}))
def edit_entry(request,group_id,node_id):
'''for editing entries
'''
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = collection.Node.find_one({'_type': "Group","name": group_id})
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = collection.Node.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
GST_ENTRY=collection.Node.one({'name':'entry_list','_type':'AttributeType'})
entry_list=collection.Node.one({'subject':ObjectId(node_id),'attribute_type.$id':GST_ENTRY._id})
GST_current=collection.Node.one({'_id':ObjectId(node_id)})
gst_bibtex=GST_current.member_of
''' converts [ObjectId('node_id')] to node_id
'''
s=str(gst_bibtex)
list(s)
gst_bibtex=(s[11:-3])
gst_bibtex=unicode(gst_bibtex, "UTF-8")
gst_entry=collection.Node.one({'_id':ObjectId(gst_bibtex)})
title=gst_entry.name
entry=entry_list.object_value
''' for creating the list of items that has to be displayed from the GAttribute of AttributeType Citation
'''
tags=[]
values=[]
args=entry.split("%")
for each in args:
tags.append(each.split('$')[0])
try:
values.append(each.split('$')[1])
except:
u="not found"
content_org=GST_current.content_org
value=""
Name=str(gst_entry.name)
i=0
key=""
value=""
list_item=tags
cite=""
if request.method == "POST":
name=request.POST.get("name")
citation_key=request.POST.get("citation_key")
var="@"+title+"{"+citation_key
value += "name$"+name+"%citation_key$"+citation_key+"%"
i=2
for each in list_item[2:]:
c = request.POST.get(each,"")
var += " , "+each+" = "+" { "+c+" }"
value += each + "$" + c + "%"
i = i+1
if (each == 'author' and c!=""):
cite += c +"."
if(each == 'title' and c!= ""):
cite += c+'.'
if(each == 'year' and c != ""):
cite += "("+c+") "
if(each=='publisher' and c!=""):
cite += "publisher:"+ c + ","
if(each == 'volume' and c!=""):
cite += "volume:"+c +","
if(each == 'edition' and c!=""):
cite += "edition:"+c+","
if(each == 'pages' and c!=""):
cite += "page "+c+","
var +="}"
GST_current.save()
Bibtex=collection.Node.one({'name':'BibTex_entry','_type':'AttributeType'})
Bibtex_entry=collection.Node.one({'subject':GST_current._id,'attribute_type.$id':Bibtex._id})
Bibtex_entry.object_value=unicode(var)
Bibtex_entry.save()
Citation=collection.Node.one({'name':'Citation','_type':'AttributeType'})
cite_key=collection.Node.one({'subject':GST_current._id,'attribute_type.$id':Citation._id})
cite_key.object_value=unicode(cite)
cite_key.save()
entry=collection.Node.one({'name':'entry_list','_type':'AttributeType'})
entry_list=collection.Node.one({'subject':GST_current._id,'attribute_type.$id':entry._id})
entry_list.object_value=unicode(value)
entry_list.save()
return HttpResponseRedirect(reverse('view_entry', kwargs={'group_id': group_id, 'node_id': gst_entry._id}))
''' sending both the dictionaries as zip to iterate them simultaneously
'''
zipped=zip(tags,values)
variable=RequestContext(request,{'group_id':group_id,'groupid':group_id,'title':GST_current.name ,'tags':tags,'values':values,'zipped':zipped,'content_org':content_org})
template="ndf/edit_entry.html"
return render_to_response(template,variable)
| agpl-3.0 |
damdam-s/bank-payment | account_banking_mandate/models/payment_line.py | 10 | 3509 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Mandate module for openERP
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# @author: Cyril Sester <[email protected]>,
# Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions, _
class PaymentLine(models.Model):
_inherit = 'payment.line'
mandate_id = fields.Many2one(
comodel_name='account.banking.mandate', string='Direct Debit Mandate',
domain=[('state', '=', 'valid')])
@api.model
def create(self, vals=None):
"""If the customer invoice has a mandate, take it
otherwise, take the first valid mandate of the bank account
"""
if vals is None:
vals = {}
partner_bank_id = vals.get('bank_id')
move_line_id = vals.get('move_line_id')
if (self.env.context.get('search_payment_order_type') == 'debit' and
'mandate_id' not in vals):
if move_line_id:
line = self.env['account.move.line'].browse(move_line_id)
if (line.invoice and line.invoice.type == 'out_invoice' and
line.invoice.mandate_id):
vals.update({
'mandate_id': line.invoice.mandate_id.id,
'bank_id': line.invoice.mandate_id.partner_bank_id.id,
})
if partner_bank_id and 'mandate_id' not in vals:
mandates = self.env['account.banking.mandate'].search(
[('partner_bank_id', '=', partner_bank_id),
('state', '=', 'valid')])
if mandates:
vals['mandate_id'] = mandates[0].id
return super(PaymentLine, self).create(vals)
@api.one
@api.constrains('mandate_id', 'bank_id')
def _check_mandate_bank_link(self):
if (self.mandate_id and self.bank_id and
self.mandate_id.partner_bank_id.id !=
self.bank_id.id):
raise exceptions.Warning(
_("The payment line with reference '%s' has the bank account "
"'%s' which is not attached to the mandate '%s' (this "
"mandate is attached to the bank account '%s').") %
(self.name,
self.env['res.partner.bank'].name_get(
[self.bank_id.id])[0][1],
self.mandate_id.unique_mandate_reference,
self.env['res.partner.bank'].name_get(
[self.mandate_id.partner_bank_id.id])[0][1]))
| agpl-3.0 |
woutdenolf/spectrocrunch | spectrocrunch/align/tests/helper_teststack.py | 1 | 12436 | # -*- coding: utf-8 -*-
import numpy as np
# import scipy.ndimage
from scipy.stats import rv_continuous
from ..types import transformationType
def makeGaussian(x, y, x0, y0, sx, sy, rho, A):
num = (
(x - x0) ** 2 / sx ** 2
- 2 * rho / (sx * sy) * (x - x0) * (y - y0)
+ (y - y0) ** 2 / sy ** 2
)
denom = 2 * (1 - rho ** 2)
return A * np.exp(-num / denom) # /(2*np.pi*sx*sy*np.sqrt(1-rho**2))
def makeGaussianAngle(x, y, x0, y0, sx, sy, angle, A):
cosa = np.cos(angle)
sina = np.sin(angle)
sin2a = np.sin(2 * angle)
varx = sx ** 2
vary = sy ** 2
a = cosa ** 2 / (2 * varx) + sina ** 2 / (2 * vary)
b = -sin2a / (4 * varx) + sin2a / (4 * vary)
c = sina ** 2 / (2 * varx) + cosa ** 2 / (2 * vary)
num = a * (x - x0) ** 2 - 2 * b * (x - x0) * (y - y0) + c * (y - y0) ** 2
return A * np.exp(-num)
def gettransformedimage(x, y, data, angle=False):
ret = np.zeros(x.shape, dtype=float)
if angle:
proc = makeGaussianAngle
else:
proc = makeGaussian
for x0, y0, sx, sy, rho, A in data:
ret += proc(x, y, x0, y0, sx, sy, rho, A)
# ret /= np.max(ret)
return ret
# def getlargeimage(npeaks,nsigma,shape,subshape):
# n = np.round(npeaks*min(shape)/min(subshape)).astype(np.int)
# image = np.zeros(shape,dtype=np.float32)
# np.random.seed(1)
# x = shape[0]*np.random.random(n**2)
# y = shape[1]*np.random.random(n**2)
# image[x.astype(np.int), y.astype(np.int)] = 1
# image = scipy.ndimage.gaussian_filter(image, sigma=min(subshape)/(npeaks*nsigma))
# image /= np.max(image)
def makeGaussian1D(x, x0, sx, A):
return A * np.exp(-((x - x0) ** 2) / (2.0 * sx ** 2)) # /(np.sqrt(2*np.pi)*sx)
def gettransformedvector(x, data, angle=False):
ret = np.zeros(x.shape, dtype=float)
for x0, sx, A in data:
ret += makeGaussian1D(x, x0, sx, A)
# ret /= np.max(ret)
return ret
def translation(dx, dy):
Mcoord = np.identity(3)
Mcof = np.identity(3)
Mcoord[0:2, 2] = [dx, dy]
Mcof[0:2, 2] = [-dx, -dy]
return Mcoord, Mcof
def rigid(a, tx, ty):
if a < 0 or a > 1:
raise ValueError("A rigid transformation is expected.")
Mcoord = np.identity(3)
Mcof = np.identity(3)
b = np.sqrt(1 - a * a)
Mcoord[0, 0:3] = [a, -b, tx]
Mcoord[1, 0:3] = [b, a, ty]
Mcof[0, 0:3] = [a, b, -a * tx - b * ty]
Mcof[1, 0:3] = [-b, a, b * tx - a * ty]
return Mcoord, Mcof
def similarity(a, b, tx, ty):
Mcoord = np.identity(3)
Mcof = np.identity(3)
Mcoord[0, 0:3] = [a, -b, tx]
Mcoord[1, 0:3] = [b, a, ty]
s = np.float(a * a + b * b)
Mcof[0, 0:3] = [a / s, b / s, -(a * tx + b * ty) / s]
Mcof[1, 0:3] = [-b / s, a / s, (b * tx - a * ty) / s]
return Mcoord, Mcof
def affine(a, b, c, d, tx, ty):
Mcoord = np.identity(3)
Mcof = np.identity(3)
Mcoord[0, 0:3] = [a, b, tx]
Mcoord[1, 0:3] = [c, d, ty]
det = np.float(a * d - b * c)
Mcof[0, 0:3] = [d / det, -b / det, (b * ty - d * tx) / det]
Mcof[1, 0:3] = [-c / det, a / det, (c * tx - a * ty) / det]
return Mcoord, Mcof
def projective(a, b, c, d, tx, ty, px, py):
Mcoord = np.identity(3)
Mcof = np.identity(3)
Mcoord[0, 0:3] = [a, b, tx]
Mcoord[1, 0:3] = [c, d, ty]
Mcoord[2, 0:3] = [px, py, 1]
det = np.float(
a * d - a * py * ty - b * c + b * px * ty + c * py * tx - d * px * tx
)
Mcof[0, 0:3] = [d - py * ty, py * tx - b, b * ty - d * tx]
Mcof[1, 0:3] = [px * ty - c, a - px * tx, c * tx - a * ty]
Mcof[2, 0:3] = [c * py - d * px, b * px - a * py, a * d - b * c]
Mcof /= det
return Mcoord, Mcof
def transformation(t, n, subpixel=True):
if t == transformationType.rigid:
# total rotation is 40 degrees
a = np.around(np.cos(40.0 / 180.0 * np.pi / (n - 1)), 3)
# a = np.sqrt(3)/2 # between -1 and 1
elif t == transformationType.similarity:
a = np.around(np.cos(40.0 / 180.0 * np.pi / (n - 1)), 3)
b = np.around(np.sin(40.0 / 180.0 * np.pi / (n - 1)), 3)
a *= 1.1
b *= 1.1
else:
a = np.around(np.cos(40.0 / 180.0 * np.pi / (n - 1)), 3)
b = np.around(np.sin(40.0 / 180.0 * np.pi / (n - 1)), 3)
a *= 1.1
b *= 1.2
c = -b * 0.9
d = a * 0.9
if subpixel:
tx = 1.5
ty = -1.7
else:
tx = 1.0
ty = -4.0 # TODO; -2
px = 0.001
py = -0.001
if t == transformationType.translation:
return translation(tx, ty)
elif t == transformationType.rigid:
return rigid(a, tx, ty)
elif t == transformationType.similarity:
return similarity(a, b, tx, ty)
elif t == transformationType.affine:
return affine(a, b, c, d, tx, ty)
elif t == transformationType.projective:
return projective(a, b, c, d, tx, ty, px, py)
else:
raise NotImplementedError("This transformation to has not been implemented.")
class rvgen(rv_continuous):
def _pdf(self, x):
H = 1 / np.sqrt(2.0 * np.pi)
return H * np.exp(-(x ** 2) / 2.0)
return H * (1 - np.exp(-(x ** 2) / 2.0))
def random(a, b, n):
return a + (b - a) * np.random.random(n)
def genpos1d(a, b, npeaks):
x = random(a, b, npeaks)
dr = (b - a) * 0.1
xm = (a + b) / 2.0
ind = abs(x - xm) > dr
x = x[ind]
npeaks = sum(ind)
return x, npeaks
def genpos2d(a, b, c, d, npeaks):
x = random(a, b, npeaks)
y = random(c, d, npeaks)
dr = max(b - a, d - c) * 0.1
xm = (a + b) / 2.0
ym = (c + d) / 2.0
r = ((x - xm) ** 2 + (y - ym) ** 2) ** 0.5
ind = r > dr
x = x[ind]
y = y[ind]
npeaks = sum(ind)
return x, y, npeaks
def data(
transfotype,
ndim1=61, # TODO 61
ndim2=57,
nimages=4,
nstacks=2,
ndim=100,
vector=False,
transposed=False,
realistic=True,
subpixel=True,
plot=False,
inverse=False,
):
"""
Returns:
3-tuple: list of image stacks, change-of-coordinate matrix between the subsequent images, stack dimensions
"""
if vector and transfotype != transformationType.translation:
raise ValueError("Vectors can only be shifted.")
# Transformation between images
Mcoord, Mcof = transformation(transfotype, nimages, subpixel=subpixel)
stackdim = 0
if vector:
# Shape of a subimage
if transposed:
dimi = 0
subshape = (ndim, 1)
else:
dimi = 1
subshape = (1, ndim)
Mcoord[dimi, 2] = 0
Mcof[dimi, 2] = 0
subxmin = -subshape[dimi] // 2
subxmax = subshape[dimi] // 2
if transposed:
subshape = (subxmax - subxmin + 1, 1)
else:
subshape = (1, subxmax - subxmin + 1)
# Determine shape of large image (transform corners)
d = Mcof[1 - dimi, 2] * (nimages - 1)
xmin = int(min(subxmin, subxmin + d))
xmax = int(max(subxmax, subxmax + d))
# Gaussians in large image
npix = np.product(subshape)
npixperpeak = 2.0
npeaks = int(npix / npixperpeak)
sx = npixperpeak * 1.5
margin = int(10 * sx)
xmin -= margin
xmax += margin
shape = subshape
if transposed:
shape = (xmax - xmin + 1, 1)
else:
shape = (1, xmax - xmin + 1)
np.random.seed(1)
if realistic:
x0, npeaks = genpos1d(xmin, xmax, npeaks)
else:
x0, npeaks = genpos1d(subxmin, subxmax, npeaks)
ind = (x0 > 5) | (x0 < 5)
x0 = x0[ind]
npeaks = sum(ind)
sx = random(sx * 0.8, sx * 1.2, npeaks)
A = random(1, 5, npeaks)
data = tuple(zip(x0, sx, A))
# Plot full images
if plot:
xv = np.arange(xmin, xmax + 1)
img = gettransformedvector(xv, data).reshape(shape)
import matplotlib.pyplot as plt
plt.figure(2)
plt.plot(xv.flat, img.flat)
plt.show()
# Stack of transformed subimages
if realistic:
s = subshape
xv = np.arange(subxmin, subxmax + 1)
else:
s = shape
xv = np.arange(xmin, xmax + 1)
ret = np.empty((nimages,) + s, dtype=np.float32)
xy = np.copy(xv)
ret[0] = gettransformedvector(xy, data).reshape(s)
for i in range(1, nimages):
xy = xy + Mcof[1 - dimi, 2]
ret[i] = gettransformedvector(xy, data).reshape(s)
else:
# Shape of a subimage
subshape = (ndim1, ndim2)
subxmin = -subshape[1] // 2
subymin = -subshape[0] // 2
subxmax = subshape[1] // 2
subymax = subshape[0] // 2
subxmax = subshape[1] - 1
subymax = subshape[0] - 1
subshape = (subymax - subymin + 1, subxmax - subxmin + 1)
# Determine shape of large image (transform corners)
xy = np.empty((3, 4))
xy[0, :] = [subxmin, subxmax, subxmin, subxmax]
xy[1, :] = [subymin, subymin, subymax, subymax]
xy[2, :] = [1, 1, 1, 1]
myminmax = np.append(np.min(xy, axis=1), np.max(xy, axis=1))
for i in range(1, nimages):
xy = np.dot(Mcoord, xy)
xy[0, :] /= xy[2, :]
xy[1, :] /= xy[2, :]
myminmax[0:3] = np.minimum(myminmax[0:3], np.min(xy, axis=1))
myminmax[3:] = np.maximum(myminmax[3:], np.max(xy, axis=1))
xmin = int(myminmax[0])
ymin = int(myminmax[1])
xmax = int(myminmax[3])
ymax = int(myminmax[4])
# Gaussians in large image
npix = np.product(subshape)
npixperpeak = 10.0
npeaks = int(npix / npixperpeak)
sxy = np.sqrt(npixperpeak)
margin = int(10 * sxy)
xmin -= margin
ymin -= margin
xmax += margin
ymax += margin
shape = (ymax - ymin + 1, xmax - xmin + 1)
np.random.seed(1)
if realistic:
x0, y0, npeaks = genpos2d(xmin, xmax, ymin, ymax, npeaks)
else:
x0, y0, npeaks = genpos2d(subxmin, subxmax, subymin, subymax, npeaks)
sx = random(sxy * 0.8, sxy * 1.2, npeaks)
sy = random(sxy * 0.8, sxy * 1.2, npeaks)
rho = random(0, 0.2, npeaks)
A = random(1, 5, npeaks)
data = tuple(zip(x0, y0, sx, sy, rho, A))
# Plot full image
if plot:
xv, yv = np.meshgrid(np.arange(xmin, xmax + 1), np.arange(ymin, ymax + 1))
xv = xv.reshape((1, shape[0] * shape[1]))
yv = yv.reshape((1, shape[0] * shape[1]))
xy = np.vstack((xv, yv, np.ones_like(xv)))
img = gettransformedimage(xv, yv, data).reshape(shape)
import matplotlib.pyplot as plt
plt.figure(2)
plt.subplot(111)
plt.imshow(img, origin="lower", interpolation="nearest")
plt.show()
# Stack of transformed subimages
if realistic:
s = subshape
xv, yv = np.meshgrid(
np.arange(subxmin, subxmax + 1), np.arange(subymin, subymax + 1)
)
ox, oy = subxmin, subymin
else:
s = shape
xv, yv = np.meshgrid(np.arange(xmin, xmax + 1), np.arange(ymin, ymax + 1))
ox, oy = xmin, ymin
ret = np.empty((nimages,) + s, dtype=np.float32)
xv = xv.reshape((1, s[0] * s[1]))
yv = yv.reshape((1, s[0] * s[1]))
xy = np.vstack((xv, yv, np.ones_like(xv)))
ret[0] = gettransformedimage(xv, yv, data).reshape(s)
for i in range(1, nimages):
xy = np.dot(Mcof, xy) # coordinates from new frame to old frame
xy[0, :] /= xy[2, :]
xy[1, :] /= xy[2, :]
ret[i] = gettransformedimage(xy[0, :], xy[1, :], data).reshape(s)
# Relative change-of-frame in subimage pixel frame
C = np.identity(3, dtype=Mcof.dtype)
Cinv = np.identity(3, dtype=Mcof.dtype)
C[0:2, 2] = [ox, oy] # image pixel frame to subimage pixel frame
Cinv[0:2, 2] = -C[0:2, 2]
Mcof = np.dot(np.dot(Cinv, Mcof), C)
Mcoord = np.dot(np.dot(Cinv, Mcoord), C)
# Mcoord: change-of-frame matrix of the back-transformation
if inverse:
ret = ret.max() - ret
return [ret.copy() for _ in range(nstacks)], Mcoord, stackdim
| mit |
teto/ns-3-dev-git | src/wimax/bindings/modulegen__gcc_LP64.py | 1 | 841703 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.wimax', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## ul-job.h (module 'wimax'): ns3::ReqType [enumeration]
module.add_enum('ReqType', ['DATA', 'UNICAST_POLLING'])
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core')
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', import_from_module='ns.network', allow_subclassing=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator', 'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator*', 'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator&', 'ns3::AttributeConstructionList::CIterator&')
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## cid.h (module 'wimax'): ns3::Cid [class]
module.add_class('Cid')
## cid.h (module 'wimax'): ns3::Cid::Type [enumeration]
module.add_enum('Type', ['BROADCAST', 'INITIAL_RANGING', 'BASIC', 'PRIMARY', 'TRANSPORT', 'MULTICAST', 'PADDING'], outer_class=root_module['ns3::Cid'])
## cid-factory.h (module 'wimax'): ns3::CidFactory [class]
module.add_class('CidFactory')
## cs-parameters.h (module 'wimax'): ns3::CsParameters [class]
module.add_class('CsParameters')
## cs-parameters.h (module 'wimax'): ns3::CsParameters::Action [enumeration]
module.add_enum('Action', ['ADD', 'REPLACE', 'DELETE'], outer_class=root_module['ns3::CsParameters'])
## dl-mac-messages.h (module 'wimax'): ns3::DcdChannelEncodings [class]
module.add_class('DcdChannelEncodings', allow_subclassing=True)
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::OutputStreamWrapper> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Packet> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::DlFramePrefixIe [class]
module.add_class('DlFramePrefixIe')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord [class]
module.add_class('IpcsClassifierRecord')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent', import_from_module='ns.core')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >', 'ns3::LogComponent::ComponentList')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >*', 'ns3::LogComponent::ComponentList*')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >&', 'ns3::LogComponent::ComponentList&')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )', 'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )*', 'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )&', 'ns3::Mac48Address::TracedCallback&')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
module.add_class('Mac8Address', import_from_module='ns.network')
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator', 'ns3::NetDeviceContainer::Iterator')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator*', 'ns3::NetDeviceContainer::Iterator*')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator&', 'ns3::NetDeviceContainer::Iterator&')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::Node > > const_iterator', 'ns3::NodeContainer::Iterator')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::Node > > const_iterator*', 'ns3::NodeContainer::Iterator*')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::Node > > const_iterator&', 'ns3::NodeContainer::Iterator&')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', import_from_module='ns.core', allow_subclassing=True)
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDcdChannelEncodings [class]
module.add_class('OfdmDcdChannelEncodings', parent=root_module['ns3::DcdChannelEncodings'])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlBurstProfile [class]
module.add_class('OfdmDlBurstProfile')
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlBurstProfile::Diuc [enumeration]
module.add_enum('Diuc', ['DIUC_STC_ZONE', 'DIUC_BURST_PROFILE_1', 'DIUC_BURST_PROFILE_2', 'DIUC_BURST_PROFILE_3', 'DIUC_BURST_PROFILE_4', 'DIUC_BURST_PROFILE_5', 'DIUC_BURST_PROFILE_6', 'DIUC_BURST_PROFILE_7', 'DIUC_BURST_PROFILE_8', 'DIUC_BURST_PROFILE_9', 'DIUC_BURST_PROFILE_10', 'DIUC_BURST_PROFILE_11', 'DIUC_GAP', 'DIUC_END_OF_MAP'], outer_class=root_module['ns3::OfdmDlBurstProfile'])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlMapIe [class]
module.add_class('OfdmDlMapIe')
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlBurstProfile [class]
module.add_class('OfdmUlBurstProfile')
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlBurstProfile::Uiuc [enumeration]
module.add_enum('Uiuc', ['UIUC_INITIAL_RANGING', 'UIUC_REQ_REGION_FULL', 'UIUC_REQ_REGION_FOCUSED', 'UIUC_FOCUSED_CONTENTION_IE', 'UIUC_BURST_PROFILE_5', 'UIUC_BURST_PROFILE_6', 'UIUC_BURST_PROFILE_7', 'UIUC_BURST_PROFILE_8', 'UIUC_BURST_PROFILE_9', 'UIUC_BURST_PROFILE_10', 'UIUC_BURST_PROFILE_11', 'UIUC_BURST_PROFILE_12', 'UIUC_SUBCH_NETWORK_ENTRY', 'UIUC_END_OF_MAP'], outer_class=root_module['ns3::OfdmUlBurstProfile'])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlMapIe [class]
module.add_class('OfdmUlMapIe')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::ItemType [enumeration]
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## log.h (module 'core'): ns3::ParameterLogger [class]
module.add_class('ParameterLogger', import_from_module='ns.core')
## pcap-file.h (module 'network'): ns3::PcapFile [class]
module.add_class('PcapFile', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [class]
module.add_class('PcapHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper::DataLinkType [enumeration]
module.add_enum('DataLinkType', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_LINUX_SLL', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO', 'DLT_IEEE802_15_4', 'DLT_NETLINK'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]
module.add_class('PcapHelperForDevice', import_from_module='ns.network', allow_subclassing=True)
## snr-to-block-error-rate-manager.h (module 'wimax'): ns3::SNRToBlockErrorRateManager [class]
module.add_class('SNRToBlockErrorRateManager')
## snr-to-block-error-rate-record.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord [class]
module.add_class('SNRToBlockErrorRateRecord')
## ss-record.h (module 'wimax'): ns3::SSRecord [class]
module.add_class('SSRecord')
## send-params.h (module 'wimax'): ns3::SendParams [class]
module.add_class('SendParams')
## service-flow.h (module 'wimax'): ns3::ServiceFlow [class]
module.add_class('ServiceFlow')
## service-flow.h (module 'wimax'): ns3::ServiceFlow::Direction [enumeration]
module.add_enum('Direction', ['SF_DIRECTION_DOWN', 'SF_DIRECTION_UP'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::Type [enumeration]
module.add_enum('Type', ['SF_TYPE_PROVISIONED', 'SF_TYPE_ADMITTED', 'SF_TYPE_ACTIVE'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::SchedulingType [enumeration]
module.add_enum('SchedulingType', ['SF_TYPE_NONE', 'SF_TYPE_UNDEF', 'SF_TYPE_BE', 'SF_TYPE_NRTPS', 'SF_TYPE_RTPS', 'SF_TYPE_UGS', 'SF_TYPE_ALL'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::CsSpecification [enumeration]
module.add_enum('CsSpecification', ['ATM', 'IPV4', 'IPV6', 'ETHERNET', 'VLAN', 'IPV4_OVER_ETHERNET', 'IPV6_OVER_ETHERNET', 'IPV4_OVER_VLAN', 'IPV6_OVER_VLAN'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ModulationType [enumeration]
module.add_enum('ModulationType', ['MODULATION_TYPE_BPSK_12', 'MODULATION_TYPE_QPSK_12', 'MODULATION_TYPE_QPSK_34', 'MODULATION_TYPE_QAM16_12', 'MODULATION_TYPE_QAM16_34', 'MODULATION_TYPE_QAM64_23', 'MODULATION_TYPE_QAM64_34'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow-record.h (module 'wimax'): ns3::ServiceFlowRecord [class]
module.add_class('ServiceFlowRecord')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::ObjectBase'], template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'])
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', import_from_module='ns.core', destructor_visibility='private')
## simulator.h (module 'core'): ns3::Simulator [enumeration]
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## wimax-tlv.h (module 'wimax'): ns3::TlvValue [class]
module.add_class('TlvValue', allow_subclassing=True)
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue [class]
module.add_class('TosTlvValue', parent=root_module['ns3::TlvValue'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias('uint32_t', 'ns3::TypeId::hash_t')
typehandlers.add_type_alias('uint32_t*', 'ns3::TypeId::hash_t*')
typehandlers.add_type_alias('uint32_t&', 'ns3::TypeId::hash_t&')
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue [class]
module.add_class('U16TlvValue', parent=root_module['ns3::TlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue [class]
module.add_class('U32TlvValue', parent=root_module['ns3::TlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue [class]
module.add_class('U8TlvValue', parent=root_module['ns3::TlvValue'])
## ul-mac-messages.h (module 'wimax'): ns3::UcdChannelEncodings [class]
module.add_class('UcdChannelEncodings', allow_subclassing=True)
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue [class]
module.add_class('VectorTlvValue', parent=root_module['ns3::TlvValue'])
typehandlers.add_type_alias('std::vector< ns3::Tlv * > const_iterator', 'ns3::VectorTlvValue::Iterator')
typehandlers.add_type_alias('std::vector< ns3::Tlv * > const_iterator*', 'ns3::VectorTlvValue::Iterator*')
typehandlers.add_type_alias('std::vector< ns3::Tlv * > const_iterator&', 'ns3::VectorTlvValue::Iterator&')
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper [class]
module.add_class('WimaxHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::NetDeviceType [enumeration]
module.add_enum('NetDeviceType', ['DEVICE_TYPE_SUBSCRIBER_STATION', 'DEVICE_TYPE_BASE_STATION'], outer_class=root_module['ns3::WimaxHelper'])
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::PhyType [enumeration]
module.add_enum('PhyType', ['SIMPLE_PHY_TYPE_OFDM'], outer_class=root_module['ns3::WimaxHelper'])
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::SchedulerType [enumeration]
module.add_enum('SchedulerType', ['SCHED_TYPE_SIMPLE', 'SCHED_TYPE_RTPS', 'SCHED_TYPE_MBQOS'], outer_class=root_module['ns3::WimaxHelper'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam [class]
module.add_class('simpleOfdmSendParam')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue [class]
module.add_class('ClassificationRuleVectorTlvValue', parent=root_module['ns3::VectorTlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue::ClassificationRuleTlvType [enumeration]
module.add_enum('ClassificationRuleTlvType', ['Priority', 'ToS', 'Protocol', 'IP_src', 'IP_dst', 'Port_src', 'Port_dst', 'Index'], outer_class=root_module['ns3::ClassificationRuleVectorTlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue [class]
module.add_class('CsParamVectorTlvValue', parent=root_module['ns3::VectorTlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue::Type [enumeration]
module.add_enum('Type', ['Classifier_DSC_Action', 'Packet_Classification_Rule'], outer_class=root_module['ns3::CsParamVectorTlvValue'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue [class]
module.add_class('Ipv4AddressTlvValue', parent=root_module['ns3::TlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr [struct]
module.add_class('ipv4Addr', outer_class=root_module['ns3::Ipv4AddressTlvValue'])
typehandlers.add_type_alias('std::vector< ns3::Ipv4AddressTlvValue::ipv4Addr > const_iterator', 'ns3::Ipv4AddressTlvValue::Iterator')
typehandlers.add_type_alias('std::vector< ns3::Ipv4AddressTlvValue::ipv4Addr > const_iterator*', 'ns3::Ipv4AddressTlvValue::Iterator*')
typehandlers.add_type_alias('std::vector< ns3::Ipv4AddressTlvValue::ipv4Addr > const_iterator&', 'ns3::Ipv4AddressTlvValue::Iterator&')
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType [class]
module.add_class('MacHeaderType', parent=root_module['ns3::Header'])
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType::HeaderType [enumeration]
module.add_enum('HeaderType', ['HEADER_TYPE_GENERIC', 'HEADER_TYPE_BANDWIDTH'], outer_class=root_module['ns3::MacHeaderType'])
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType [class]
module.add_class('ManagementMessageType', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType::MessageType [enumeration]
module.add_enum('MessageType', ['MESSAGE_TYPE_UCD', 'MESSAGE_TYPE_DCD', 'MESSAGE_TYPE_DL_MAP', 'MESSAGE_TYPE_UL_MAP', 'MESSAGE_TYPE_RNG_REQ', 'MESSAGE_TYPE_RNG_RSP', 'MESSAGE_TYPE_REG_REQ', 'MESSAGE_TYPE_REG_RSP', 'MESSAGE_TYPE_DSA_REQ', 'MESSAGE_TYPE_DSA_RSP', 'MESSAGE_TYPE_DSA_ACK'], outer_class=root_module['ns3::ManagementMessageType'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::OfdmDownlinkFramePrefix [class]
module.add_class('OfdmDownlinkFramePrefix', parent=root_module['ns3::Header'])
## send-params.h (module 'wimax'): ns3::OfdmSendParams [class]
module.add_class('OfdmSendParams', parent=root_module['ns3::SendParams'])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUcdChannelEncodings [class]
module.add_class('OfdmUcdChannelEncodings', parent=root_module['ns3::UcdChannelEncodings'])
## packet-burst.h (module 'network'): ns3::PacketBurst [class]
module.add_class('PacketBurst', import_from_module='ns.network', parent=root_module['ns3::Object'])
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::PacketBurst const > )', 'ns3::PacketBurst::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::PacketBurst const > )*', 'ns3::PacketBurst::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::PacketBurst const > )&', 'ns3::PacketBurst::TracedCallback&')
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class]
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue [class]
module.add_class('PortRangeTlvValue', parent=root_module['ns3::TlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange [struct]
module.add_class('PortRange', outer_class=root_module['ns3::PortRangeTlvValue'])
typehandlers.add_type_alias('std::vector< ns3::PortRangeTlvValue::PortRange > const_iterator', 'ns3::PortRangeTlvValue::Iterator')
typehandlers.add_type_alias('std::vector< ns3::PortRangeTlvValue::PortRange > const_iterator*', 'ns3::PortRangeTlvValue::Iterator*')
typehandlers.add_type_alias('std::vector< ns3::PortRangeTlvValue::PortRange > const_iterator&', 'ns3::PortRangeTlvValue::Iterator&')
## ul-job.h (module 'wimax'): ns3::PriorityUlJob [class]
module.add_class('PriorityUlJob', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class]
module.add_class('PropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::Object'])
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue [class]
module.add_class('ProtocolTlvValue', parent=root_module['ns3::TlvValue'])
typehandlers.add_type_alias('std::vector< unsigned char > const_iterator', 'ns3::ProtocolTlvValue::Iterator')
typehandlers.add_type_alias('std::vector< unsigned char > const_iterator*', 'ns3::ProtocolTlvValue::Iterator*')
typehandlers.add_type_alias('std::vector< unsigned char > const_iterator&', 'ns3::ProtocolTlvValue::Iterator&')
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class]
module.add_class('RandomPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel [class]
module.add_class('RangePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## mac-messages.h (module 'wimax'): ns3::RngReq [class]
module.add_class('RngReq', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::RngRsp [class]
module.add_class('RngRsp', parent=root_module['ns3::Header'])
## ss-manager.h (module 'wimax'): ns3::SSManager [class]
module.add_class('SSManager', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlowManager [class]
module.add_class('ServiceFlowManager', parent=root_module['ns3::Object'])
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlowManager::ConfirmationCode [enumeration]
module.add_enum('ConfirmationCode', ['CONFIRMATION_CODE_SUCCESS', 'CONFIRMATION_CODE_REJECT'], outer_class=root_module['ns3::ServiceFlowManager'])
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue [class]
module.add_class('SfVectorTlvValue', parent=root_module['ns3::VectorTlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue::Type [enumeration]
module.add_enum('Type', ['SFID', 'CID', 'Service_Class_Name', 'reserved1', 'QoS_Parameter_Set_Type', 'Traffic_Priority', 'Maximum_Sustained_Traffic_Rate', 'Maximum_Traffic_Burst', 'Minimum_Reserved_Traffic_Rate', 'Minimum_Tolerable_Traffic_Rate', 'Service_Flow_Scheduling_Type', 'Request_Transmission_Policy', 'Tolerated_Jitter', 'Maximum_Latency', 'Fixed_length_versus_Variable_length_SDU_Indicator', 'SDU_Size', 'Target_SAID', 'ARQ_Enable', 'ARQ_WINDOW_SIZE', 'ARQ_RETRY_TIMEOUT_Transmitter_Delay', 'ARQ_RETRY_TIMEOUT_Receiver_Delay', 'ARQ_BLOCK_LIFETIME', 'ARQ_SYNC_LOSS', 'ARQ_DELIVER_IN_ORDER', 'ARQ_PURGE_TIMEOUT', 'ARQ_BLOCK_SIZE', 'reserved2', 'CS_Specification', 'IPV4_CS_Parameters'], outer_class=root_module['ns3::SfVectorTlvValue'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), automatic_type_narrowing=True, parent=root_module['ns3::empty'], template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'])
## ss-service-flow-manager.h (module 'wimax'): ns3::SsServiceFlowManager [class]
module.add_class('SsServiceFlowManager', parent=root_module['ns3::ServiceFlowManager'])
## ss-service-flow-manager.h (module 'wimax'): ns3::SsServiceFlowManager::ConfirmationCode [enumeration]
module.add_enum('ConfirmationCode', ['CONFIRMATION_CODE_SUCCESS', 'CONFIRMATION_CODE_REJECT'], outer_class=root_module['ns3::SsServiceFlowManager'])
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel [class]
module.add_class('ThreeLogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )', 'ns3::Time::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )*', 'ns3::Time::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )&', 'ns3::Time::TracedCallback&')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## wimax-tlv.h (module 'wimax'): ns3::Tlv [class]
module.add_class('Tlv', parent=root_module['ns3::Header'])
## wimax-tlv.h (module 'wimax'): ns3::Tlv::CommonTypes [enumeration]
module.add_enum('CommonTypes', ['HMAC_TUPLE', 'MAC_VERSION_ENCODING', 'CURRENT_TRANSMIT_POWER', 'DOWNLINK_SERVICE_FLOW', 'UPLINK_SERVICE_FLOW', 'VENDOR_ID_EMCODING', 'VENDOR_SPECIFIC_INFORMATION'], outer_class=root_module['ns3::Tlv'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel [class]
module.add_class('TwoRayGroundPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## ul-mac-messages.h (module 'wimax'): ns3::Ucd [class]
module.add_class('Ucd', parent=root_module['ns3::Header'])
## ul-job.h (module 'wimax'): ns3::UlJob [class]
module.add_class('UlJob', parent=root_module['ns3::Object'])
## ul-job.h (module 'wimax'): ns3::UlJob::JobPriority [enumeration]
module.add_enum('JobPriority', ['LOW', 'INTERMEDIATE', 'HIGH'], outer_class=root_module['ns3::UlJob'])
## ul-mac-messages.h (module 'wimax'): ns3::UlMap [class]
module.add_class('UlMap', parent=root_module['ns3::Header'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## bs-uplink-scheduler.h (module 'wimax'): ns3::UplinkScheduler [class]
module.add_class('UplinkScheduler', parent=root_module['ns3::Object'])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::UplinkSchedulerMBQoS [class]
module.add_class('UplinkSchedulerMBQoS', parent=root_module['ns3::UplinkScheduler'])
## bs-uplink-scheduler-rtps.h (module 'wimax'): ns3::UplinkSchedulerRtps [class]
module.add_class('UplinkSchedulerRtps', parent=root_module['ns3::UplinkScheduler'])
## bs-uplink-scheduler-simple.h (module 'wimax'): ns3::UplinkSchedulerSimple [class]
module.add_class('UplinkSchedulerSimple', parent=root_module['ns3::UplinkScheduler'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## wimax-connection.h (module 'wimax'): ns3::WimaxConnection [class]
module.add_class('WimaxConnection', parent=root_module['ns3::Object'])
typehandlers.add_type_alias('std::list< ns3::Ptr< ns3::Packet const > >', 'ns3::WimaxConnection::FragmentsQueue')
typehandlers.add_type_alias('std::list< ns3::Ptr< ns3::Packet const > >*', 'ns3::WimaxConnection::FragmentsQueue*')
typehandlers.add_type_alias('std::list< ns3::Ptr< ns3::Packet const > >&', 'ns3::WimaxConnection::FragmentsQueue&')
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue [class]
module.add_class('WimaxMacQueue', parent=root_module['ns3::Object'])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement [struct]
module.add_class('QueueElement', outer_class=root_module['ns3::WimaxMacQueue'])
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::WimaxMacToMacHeader [class]
module.add_class('WimaxMacToMacHeader', parent=root_module['ns3::Header'])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy [class]
module.add_class('WimaxPhy', parent=root_module['ns3::Object'])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::ModulationType [enumeration]
module.add_enum('ModulationType', ['MODULATION_TYPE_BPSK_12', 'MODULATION_TYPE_QPSK_12', 'MODULATION_TYPE_QPSK_34', 'MODULATION_TYPE_QAM16_12', 'MODULATION_TYPE_QAM16_34', 'MODULATION_TYPE_QAM64_23', 'MODULATION_TYPE_QAM64_34'], outer_class=root_module['ns3::WimaxPhy'])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyState [enumeration]
module.add_enum('PhyState', ['PHY_STATE_IDLE', 'PHY_STATE_SCANNING', 'PHY_STATE_TX', 'PHY_STATE_RX'], outer_class=root_module['ns3::WimaxPhy'])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyType [enumeration]
module.add_enum('PhyType', ['SimpleWimaxPhy', 'simpleOfdmWimaxPhy'], outer_class=root_module['ns3::WimaxPhy'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', import_from_module='ns.core', automatic_type_narrowing=True, allow_subclassing=False, parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', import_from_module='ns.core', automatic_type_narrowing=True, allow_subclassing=False, parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## bs-scheduler.h (module 'wimax'): ns3::BSScheduler [class]
module.add_class('BSScheduler', parent=root_module['ns3::Object'])
## bs-scheduler-rtps.h (module 'wimax'): ns3::BSSchedulerRtps [class]
module.add_class('BSSchedulerRtps', parent=root_module['ns3::BSScheduler'])
## bs-scheduler-simple.h (module 'wimax'): ns3::BSSchedulerSimple [class]
module.add_class('BSSchedulerSimple', parent=root_module['ns3::BSScheduler'])
## wimax-mac-header.h (module 'wimax'): ns3::BandwidthRequestHeader [class]
module.add_class('BandwidthRequestHeader', parent=root_module['ns3::Header'])
## wimax-mac-header.h (module 'wimax'): ns3::BandwidthRequestHeader::HeaderType [enumeration]
module.add_enum('HeaderType', ['HEADER_TYPE_INCREMENTAL', 'HEADER_TYPE_AGGREGATE'], outer_class=root_module['ns3::BandwidthRequestHeader'])
## bs-service-flow-manager.h (module 'wimax'): ns3::BsServiceFlowManager [class]
module.add_class('BsServiceFlowManager', parent=root_module['ns3::ServiceFlowManager'])
## bs-service-flow-manager.h (module 'wimax'): ns3::BsServiceFlowManager::ConfirmationCode [enumeration]
module.add_enum('ConfirmationCode', ['CONFIRMATION_CODE_SUCCESS', 'CONFIRMATION_CODE_REJECT'], outer_class=root_module['ns3::BsServiceFlowManager'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## connection-manager.h (module 'wimax'): ns3::ConnectionManager [class]
module.add_class('ConnectionManager', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## dl-mac-messages.h (module 'wimax'): ns3::Dcd [class]
module.add_class('Dcd', parent=root_module['ns3::Header'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## dl-mac-messages.h (module 'wimax'): ns3::DlMap [class]
module.add_class('DlMap', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::DsaAck [class]
module.add_class('DsaAck', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::DsaReq [class]
module.add_class('DsaReq', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::DsaRsp [class]
module.add_class('DsaRsp', parent=root_module['ns3::Header'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel [class]
module.add_class('FixedRssLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## wimax-mac-header.h (module 'wimax'): ns3::FragmentationSubheader [class]
module.add_class('FragmentationSubheader', parent=root_module['ns3::Header'])
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel [class]
module.add_class('FriisPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## wimax-mac-header.h (module 'wimax'): ns3::GenericMacHeader [class]
module.add_class('GenericMacHeader', parent=root_module['ns3::Header'])
## wimax-mac-header.h (module 'wimax'): ns3::GrantManagementSubheader [class]
module.add_class('GrantManagementSubheader', parent=root_module['ns3::Header'])
## ipcs-classifier.h (module 'wimax'): ns3::IpcsClassifier [class]
module.add_class('IpcsClassifier', parent=root_module['ns3::Object'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel [class]
module.add_class('LogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel [class]
module.add_class('MatrixPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel [class]
module.add_class('NakagamiPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias('void ( * ) ( )', 'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias('void ( * ) ( )*', 'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( )&', 'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::NetDevice::PromiscReceiveCallback&')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'ns3::Node::ProtocolHandler')
typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::Node::ProtocolHandler*')
typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::Node::ProtocolHandler&')
typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::Node::DeviceAdditionListener')
typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::Node::DeviceAdditionListener*')
typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::Node::DeviceAdditionListener&')
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )', 'ns3::Packet::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )*', 'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )&', 'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', 'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', 'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', 'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', 'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', 'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', 'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', 'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', 'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', 'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )', 'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )*', 'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )&', 'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', 'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', 'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', 'ns3::Packet::SinrTracedCallback&')
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy [class]
module.add_class('SimpleOfdmWimaxPhy', parent=root_module['ns3::WimaxPhy'])
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy::FrameDurationCode [enumeration]
module.add_enum('FrameDurationCode', ['FRAME_DURATION_2_POINT_5_MS', 'FRAME_DURATION_4_MS', 'FRAME_DURATION_5_MS', 'FRAME_DURATION_8_MS', 'FRAME_DURATION_10_MS', 'FRAME_DURATION_12_POINT_5_MS', 'FRAME_DURATION_20_MS'], outer_class=root_module['ns3::SimpleOfdmWimaxPhy'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## wimax-channel.h (module 'wimax'): ns3::WimaxChannel [class]
module.add_class('WimaxChannel', parent=root_module['ns3::Channel'])
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice [class]
module.add_class('WimaxNetDevice', parent=root_module['ns3::NetDevice'])
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::Direction [enumeration]
module.add_enum('Direction', ['DIRECTION_DOWNLINK', 'DIRECTION_UPLINK'], outer_class=root_module['ns3::WimaxNetDevice'])
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::RangingStatus [enumeration]
module.add_enum('RangingStatus', ['RANGING_STATUS_EXPIRED', 'RANGING_STATUS_CONTINUE', 'RANGING_STATUS_ABORT', 'RANGING_STATUS_SUCCESS'], outer_class=root_module['ns3::WimaxNetDevice'])
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address const & )', 'ns3::WimaxNetDevice::TxRxTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address const & )*', 'ns3::WimaxNetDevice::TxRxTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address const & )&', 'ns3::WimaxNetDevice::TxRxTracedCallback&')
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice [class]
module.add_class('BaseStationNetDevice', parent=root_module['ns3::WimaxNetDevice'])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::State [enumeration]
module.add_enum('State', ['BS_STATE_DL_SUB_FRAME', 'BS_STATE_UL_SUB_FRAME', 'BS_STATE_TTG', 'BS_STATE_RTG'], outer_class=root_module['ns3::BaseStationNetDevice'])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::MacPreamble [enumeration]
module.add_enum('MacPreamble', ['SHORT_PREAMBLE', 'LONG_PREAMBLE'], outer_class=root_module['ns3::BaseStationNetDevice'])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'bool', 'unsigned long', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, const ns3::Mac48Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'const ns3::Mac48Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, const ns3::Cid &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::Mac48Address', 'const ns3::Cid &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::Mac48Address', 'ns3::Cid', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'ns3::Ptr<const ns3::PacketBurst>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'ns3::Ptr<ns3::PacketBurst>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel [class]
module.add_class('SimpleOfdmWimaxChannel', parent=root_module['ns3::WimaxChannel'])
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel::PropModel [enumeration]
module.add_enum('PropModel', ['RANDOM_PROPAGATION', 'FRIIS_PROPAGATION', 'LOG_DISTANCE_PROPAGATION', 'COST231_PROPAGATION'], outer_class=root_module['ns3::SimpleOfdmWimaxChannel'])
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice [class]
module.add_class('SubscriberStationNetDevice', parent=root_module['ns3::WimaxNetDevice'])
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::State [enumeration]
module.add_enum('State', ['SS_STATE_IDLE', 'SS_STATE_SCANNING', 'SS_STATE_SYNCHRONIZING', 'SS_STATE_ACQUIRING_PARAMETERS', 'SS_STATE_WAITING_REG_RANG_INTRVL', 'SS_STATE_WAITING_INV_RANG_INTRVL', 'SS_STATE_WAITING_RNG_RSP', 'SS_STATE_ADJUSTING_PARAMETERS', 'SS_STATE_REGISTERED', 'SS_STATE_TRANSMITTING', 'SS_STATE_STOPPED'], outer_class=root_module['ns3::SubscriberStationNetDevice'])
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::EventType [enumeration]
module.add_enum('EventType', ['EVENT_NONE', 'EVENT_WAIT_FOR_RNG_RSP', 'EVENT_DL_MAP_SYNC_TIMEOUT', 'EVENT_LOST_DL_MAP', 'EVENT_LOST_UL_MAP', 'EVENT_DCD_WAIT_TIMEOUT', 'EVENT_UCD_WAIT_TIMEOUT', 'EVENT_RANG_OPP_WAIT_TIMEOUT'], outer_class=root_module['ns3::SubscriberStationNetDevice'])
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type='map')
module.add_container('std::vector< ns3::ServiceFlow * >', 'ns3::ServiceFlow *', container_type='vector')
module.add_container('std::vector< bool >', 'bool', container_type='vector')
module.add_container('ns3::bvec', 'bool', container_type='vector')
module.add_container('std::vector< ns3::DlFramePrefixIe >', 'ns3::DlFramePrefixIe', container_type='vector')
module.add_container('std::list< ns3::Ptr< ns3::Packet > >', 'ns3::Ptr< ns3::Packet >', container_type='list')
module.add_container('std::vector< ns3::SSRecord * >', 'ns3::SSRecord *', container_type='vector')
module.add_container('std::vector< ns3::OfdmUlBurstProfile >', 'ns3::OfdmUlBurstProfile', container_type='vector')
module.add_container('std::list< ns3::OfdmUlMapIe >', 'ns3::OfdmUlMapIe', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::UlJob > >', 'ns3::Ptr< ns3::UlJob >', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::Packet const > >', 'ns3::Ptr< ns3::Packet const >', container_type='list')
module.add_container('std::deque< ns3::WimaxMacQueue::QueueElement >', 'ns3::WimaxMacQueue::QueueElement', container_type='dequeue')
module.add_container('std::list< std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > > >', 'std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > >', container_type='list')
module.add_container('std::vector< ns3::Ptr< ns3::WimaxConnection > >', 'ns3::Ptr< ns3::WimaxConnection >', container_type='vector')
module.add_container('std::vector< ns3::OfdmDlBurstProfile >', 'ns3::OfdmDlBurstProfile', container_type='vector')
module.add_container('std::list< ns3::OfdmDlMapIe >', 'ns3::OfdmDlMapIe', container_type='list')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::TimePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::TimePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::TimePrinter&')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::NodePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::NodePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::NodePrinter&')
typehandlers.add_type_alias('std::vector< bool >', 'ns3::bvec')
typehandlers.add_type_alias('std::vector< bool >*', 'ns3::bvec*')
typehandlers.add_type_alias('std::vector< bool >&', 'ns3::bvec&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )', 'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )*', 'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )&', 'ns3::TracedValueCallback::Time&')
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])
register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Cid_methods(root_module, root_module['ns3::Cid'])
register_Ns3CidFactory_methods(root_module, root_module['ns3::CidFactory'])
register_Ns3CsParameters_methods(root_module, root_module['ns3::CsParameters'])
register_Ns3DcdChannelEncodings_methods(root_module, root_module['ns3::DcdChannelEncodings'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::DefaultDeleter< ns3::OutputStreamWrapper >'])
register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3DlFramePrefixIe_methods(root_module, root_module['ns3::DlFramePrefixIe'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3IpcsClassifierRecord_methods(root_module, root_module['ns3::IpcsClassifierRecord'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3OfdmDcdChannelEncodings_methods(root_module, root_module['ns3::OfdmDcdChannelEncodings'])
register_Ns3OfdmDlBurstProfile_methods(root_module, root_module['ns3::OfdmDlBurstProfile'])
register_Ns3OfdmDlMapIe_methods(root_module, root_module['ns3::OfdmDlMapIe'])
register_Ns3OfdmUlBurstProfile_methods(root_module, root_module['ns3::OfdmUlBurstProfile'])
register_Ns3OfdmUlMapIe_methods(root_module, root_module['ns3::OfdmUlMapIe'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger'])
register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile'])
register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper'])
register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice'])
register_Ns3SNRToBlockErrorRateManager_methods(root_module, root_module['ns3::SNRToBlockErrorRateManager'])
register_Ns3SNRToBlockErrorRateRecord_methods(root_module, root_module['ns3::SNRToBlockErrorRateRecord'])
register_Ns3SSRecord_methods(root_module, root_module['ns3::SSRecord'])
register_Ns3SendParams_methods(root_module, root_module['ns3::SendParams'])
register_Ns3ServiceFlow_methods(root_module, root_module['ns3::ServiceFlow'])
register_Ns3ServiceFlowRecord_methods(root_module, root_module['ns3::ServiceFlowRecord'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TlvValue_methods(root_module, root_module['ns3::TlvValue'])
register_Ns3TosTlvValue_methods(root_module, root_module['ns3::TosTlvValue'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3U16TlvValue_methods(root_module, root_module['ns3::U16TlvValue'])
register_Ns3U32TlvValue_methods(root_module, root_module['ns3::U32TlvValue'])
register_Ns3U8TlvValue_methods(root_module, root_module['ns3::U8TlvValue'])
register_Ns3UcdChannelEncodings_methods(root_module, root_module['ns3::UcdChannelEncodings'])
register_Ns3VectorTlvValue_methods(root_module, root_module['ns3::VectorTlvValue'])
register_Ns3WimaxHelper_methods(root_module, root_module['ns3::WimaxHelper'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3SimpleOfdmSendParam_methods(root_module, root_module['ns3::simpleOfdmSendParam'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3ClassificationRuleVectorTlvValue_methods(root_module, root_module['ns3::ClassificationRuleVectorTlvValue'])
register_Ns3CsParamVectorTlvValue_methods(root_module, root_module['ns3::CsParamVectorTlvValue'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Ipv4AddressTlvValue_methods(root_module, root_module['ns3::Ipv4AddressTlvValue'])
register_Ns3Ipv4AddressTlvValueIpv4Addr_methods(root_module, root_module['ns3::Ipv4AddressTlvValue::ipv4Addr'])
register_Ns3MacHeaderType_methods(root_module, root_module['ns3::MacHeaderType'])
register_Ns3ManagementMessageType_methods(root_module, root_module['ns3::ManagementMessageType'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3OfdmDownlinkFramePrefix_methods(root_module, root_module['ns3::OfdmDownlinkFramePrefix'])
register_Ns3OfdmSendParams_methods(root_module, root_module['ns3::OfdmSendParams'])
register_Ns3OfdmUcdChannelEncodings_methods(root_module, root_module['ns3::OfdmUcdChannelEncodings'])
register_Ns3PacketBurst_methods(root_module, root_module['ns3::PacketBurst'])
register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper'])
register_Ns3PortRangeTlvValue_methods(root_module, root_module['ns3::PortRangeTlvValue'])
register_Ns3PortRangeTlvValuePortRange_methods(root_module, root_module['ns3::PortRangeTlvValue::PortRange'])
register_Ns3PriorityUlJob_methods(root_module, root_module['ns3::PriorityUlJob'])
register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel'])
register_Ns3ProtocolTlvValue_methods(root_module, root_module['ns3::ProtocolTlvValue'])
register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel'])
register_Ns3RngReq_methods(root_module, root_module['ns3::RngReq'])
register_Ns3RngRsp_methods(root_module, root_module['ns3::RngRsp'])
register_Ns3SSManager_methods(root_module, root_module['ns3::SSManager'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3ServiceFlowManager_methods(root_module, root_module['ns3::ServiceFlowManager'])
register_Ns3SfVectorTlvValue_methods(root_module, root_module['ns3::SfVectorTlvValue'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SsServiceFlowManager_methods(root_module, root_module['ns3::SsServiceFlowManager'])
register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3Tlv_methods(root_module, root_module['ns3::Tlv'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel'])
register_Ns3Ucd_methods(root_module, root_module['ns3::Ucd'])
register_Ns3UlJob_methods(root_module, root_module['ns3::UlJob'])
register_Ns3UlMap_methods(root_module, root_module['ns3::UlMap'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3UplinkScheduler_methods(root_module, root_module['ns3::UplinkScheduler'])
register_Ns3UplinkSchedulerMBQoS_methods(root_module, root_module['ns3::UplinkSchedulerMBQoS'])
register_Ns3UplinkSchedulerRtps_methods(root_module, root_module['ns3::UplinkSchedulerRtps'])
register_Ns3UplinkSchedulerSimple_methods(root_module, root_module['ns3::UplinkSchedulerSimple'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3WimaxConnection_methods(root_module, root_module['ns3::WimaxConnection'])
register_Ns3WimaxMacQueue_methods(root_module, root_module['ns3::WimaxMacQueue'])
register_Ns3WimaxMacQueueQueueElement_methods(root_module, root_module['ns3::WimaxMacQueue::QueueElement'])
register_Ns3WimaxMacToMacHeader_methods(root_module, root_module['ns3::WimaxMacToMacHeader'])
register_Ns3WimaxPhy_methods(root_module, root_module['ns3::WimaxPhy'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BSScheduler_methods(root_module, root_module['ns3::BSScheduler'])
register_Ns3BSSchedulerRtps_methods(root_module, root_module['ns3::BSSchedulerRtps'])
register_Ns3BSSchedulerSimple_methods(root_module, root_module['ns3::BSSchedulerSimple'])
register_Ns3BandwidthRequestHeader_methods(root_module, root_module['ns3::BandwidthRequestHeader'])
register_Ns3BsServiceFlowManager_methods(root_module, root_module['ns3::BsServiceFlowManager'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3ConnectionManager_methods(root_module, root_module['ns3::ConnectionManager'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3Dcd_methods(root_module, root_module['ns3::Dcd'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DlMap_methods(root_module, root_module['ns3::DlMap'])
register_Ns3DsaAck_methods(root_module, root_module['ns3::DsaAck'])
register_Ns3DsaReq_methods(root_module, root_module['ns3::DsaReq'])
register_Ns3DsaRsp_methods(root_module, root_module['ns3::DsaRsp'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel'])
register_Ns3FragmentationSubheader_methods(root_module, root_module['ns3::FragmentationSubheader'])
register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3GenericMacHeader_methods(root_module, root_module['ns3::GenericMacHeader'])
register_Ns3GrantManagementSubheader_methods(root_module, root_module['ns3::GrantManagementSubheader'])
register_Ns3IpcsClassifier_methods(root_module, root_module['ns3::IpcsClassifier'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel'])
register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3SimpleOfdmWimaxPhy_methods(root_module, root_module['ns3::SimpleOfdmWimaxPhy'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3WimaxChannel_methods(root_module, root_module['ns3::WimaxChannel'])
register_Ns3WimaxNetDevice_methods(root_module, root_module['ns3::WimaxNetDevice'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3BaseStationNetDevice_methods(root_module, root_module['ns3::BaseStationNetDevice'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Bool_Unsigned_long_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Const_ns3Mac48Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, const ns3::Mac48Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Mac48Address_Const_ns3Cid___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, const ns3::Cid &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Mac48Address_Ns3Cid_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3PacketBurst__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3PacketBurst__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3SimpleOfdmWimaxChannel_methods(root_module, root_module['ns3::SimpleOfdmWimaxChannel'])
register_Ns3SubscriberStationNetDevice_methods(root_module, root_module['ns3::SubscriberStationNetDevice'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AsciiTraceHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::ios_base::openmode filemode=std::ios_base::out) [member function]
cls.add_method('CreateFileStream',
'ns3::Ptr< ns3::OutputStreamWrapper >',
[param('std::string', 'filename'), param('std::ios_base::openmode', 'filemode', default_value='std::ios_base::out')])
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [constructor]
cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('std::string', 'prefix')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
is_virtual=True, is_pure_virtual=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Cid_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
## cid.h (module 'wimax'): ns3::Cid::Cid(ns3::Cid const & arg0) [constructor]
cls.add_constructor([param('ns3::Cid const &', 'arg0')])
## cid.h (module 'wimax'): ns3::Cid::Cid() [constructor]
cls.add_constructor([])
## cid.h (module 'wimax'): ns3::Cid::Cid(uint16_t cid) [constructor]
cls.add_constructor([param('uint16_t', 'cid')])
## cid.h (module 'wimax'): static ns3::Cid ns3::Cid::Broadcast() [member function]
cls.add_method('Broadcast',
'ns3::Cid',
[],
is_static=True)
## cid.h (module 'wimax'): uint16_t ns3::Cid::GetIdentifier() const [member function]
cls.add_method('GetIdentifier',
'uint16_t',
[],
is_const=True)
## cid.h (module 'wimax'): static ns3::Cid ns3::Cid::InitialRanging() [member function]
cls.add_method('InitialRanging',
'ns3::Cid',
[],
is_static=True)
## cid.h (module 'wimax'): bool ns3::Cid::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## cid.h (module 'wimax'): bool ns3::Cid::IsInitialRanging() const [member function]
cls.add_method('IsInitialRanging',
'bool',
[],
is_const=True)
## cid.h (module 'wimax'): bool ns3::Cid::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## cid.h (module 'wimax'): bool ns3::Cid::IsPadding() const [member function]
cls.add_method('IsPadding',
'bool',
[],
is_const=True)
## cid.h (module 'wimax'): static ns3::Cid ns3::Cid::Padding() [member function]
cls.add_method('Padding',
'ns3::Cid',
[],
is_static=True)
return
def register_Ns3CidFactory_methods(root_module, cls):
## cid-factory.h (module 'wimax'): ns3::CidFactory::CidFactory(ns3::CidFactory const & arg0) [constructor]
cls.add_constructor([param('ns3::CidFactory const &', 'arg0')])
## cid-factory.h (module 'wimax'): ns3::CidFactory::CidFactory() [constructor]
cls.add_constructor([])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::Allocate(ns3::Cid::Type type) [member function]
cls.add_method('Allocate',
'ns3::Cid',
[param('ns3::Cid::Type', 'type')])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::AllocateBasic() [member function]
cls.add_method('AllocateBasic',
'ns3::Cid',
[])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::AllocateMulticast() [member function]
cls.add_method('AllocateMulticast',
'ns3::Cid',
[])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::AllocatePrimary() [member function]
cls.add_method('AllocatePrimary',
'ns3::Cid',
[])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::AllocateTransportOrSecondary() [member function]
cls.add_method('AllocateTransportOrSecondary',
'ns3::Cid',
[])
## cid-factory.h (module 'wimax'): void ns3::CidFactory::FreeCid(ns3::Cid cid) [member function]
cls.add_method('FreeCid',
'void',
[param('ns3::Cid', 'cid')])
## cid-factory.h (module 'wimax'): bool ns3::CidFactory::IsBasic(ns3::Cid cid) const [member function]
cls.add_method('IsBasic',
'bool',
[param('ns3::Cid', 'cid')],
is_const=True)
## cid-factory.h (module 'wimax'): bool ns3::CidFactory::IsPrimary(ns3::Cid cid) const [member function]
cls.add_method('IsPrimary',
'bool',
[param('ns3::Cid', 'cid')],
is_const=True)
## cid-factory.h (module 'wimax'): bool ns3::CidFactory::IsTransport(ns3::Cid cid) const [member function]
cls.add_method('IsTransport',
'bool',
[param('ns3::Cid', 'cid')],
is_const=True)
return
def register_Ns3CsParameters_methods(root_module, cls):
## cs-parameters.h (module 'wimax'): ns3::CsParameters::CsParameters(ns3::CsParameters const & arg0) [constructor]
cls.add_constructor([param('ns3::CsParameters const &', 'arg0')])
## cs-parameters.h (module 'wimax'): ns3::CsParameters::CsParameters() [constructor]
cls.add_constructor([])
## cs-parameters.h (module 'wimax'): ns3::CsParameters::CsParameters(ns3::Tlv tlv) [constructor]
cls.add_constructor([param('ns3::Tlv', 'tlv')])
## cs-parameters.h (module 'wimax'): ns3::CsParameters::CsParameters(ns3::CsParameters::Action classifierDscAction, ns3::IpcsClassifierRecord classifier) [constructor]
cls.add_constructor([param('ns3::CsParameters::Action', 'classifierDscAction'), param('ns3::IpcsClassifierRecord', 'classifier')])
## cs-parameters.h (module 'wimax'): ns3::CsParameters::Action ns3::CsParameters::GetClassifierDscAction() const [member function]
cls.add_method('GetClassifierDscAction',
'ns3::CsParameters::Action',
[],
is_const=True)
## cs-parameters.h (module 'wimax'): ns3::IpcsClassifierRecord ns3::CsParameters::GetPacketClassifierRule() const [member function]
cls.add_method('GetPacketClassifierRule',
'ns3::IpcsClassifierRecord',
[],
is_const=True)
## cs-parameters.h (module 'wimax'): void ns3::CsParameters::SetClassifierDscAction(ns3::CsParameters::Action action) [member function]
cls.add_method('SetClassifierDscAction',
'void',
[param('ns3::CsParameters::Action', 'action')])
## cs-parameters.h (module 'wimax'): void ns3::CsParameters::SetPacketClassifierRule(ns3::IpcsClassifierRecord packetClassifierRule) [member function]
cls.add_method('SetPacketClassifierRule',
'void',
[param('ns3::IpcsClassifierRecord', 'packetClassifierRule')])
## cs-parameters.h (module 'wimax'): ns3::Tlv ns3::CsParameters::ToTlv() const [member function]
cls.add_method('ToTlv',
'ns3::Tlv',
[],
is_const=True)
return
def register_Ns3DcdChannelEncodings_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::DcdChannelEncodings::DcdChannelEncodings(ns3::DcdChannelEncodings const & arg0) [constructor]
cls.add_constructor([param('ns3::DcdChannelEncodings const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::DcdChannelEncodings::DcdChannelEncodings() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::DcdChannelEncodings::GetBsEirp() const [member function]
cls.add_method('GetBsEirp',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::DcdChannelEncodings::GetEirxPIrMax() const [member function]
cls.add_method('GetEirxPIrMax',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::DcdChannelEncodings::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'uint32_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::DcdChannelEncodings::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::DcdChannelEncodings::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## dl-mac-messages.h (module 'wimax'): void ns3::DcdChannelEncodings::SetBsEirp(uint16_t bs_eirp) [member function]
cls.add_method('SetBsEirp',
'void',
[param('uint16_t', 'bs_eirp')])
## dl-mac-messages.h (module 'wimax'): void ns3::DcdChannelEncodings::SetEirxPIrMax(uint16_t rss_ir_max) [member function]
cls.add_method('SetEirxPIrMax',
'void',
[param('uint16_t', 'rss_ir_max')])
## dl-mac-messages.h (module 'wimax'): void ns3::DcdChannelEncodings::SetFrequency(uint32_t frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('uint32_t', 'frequency')])
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::DcdChannelEncodings::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::DcdChannelEncodings::DoRead(ns3::Buffer::Iterator start) [member function]
cls.add_method('DoRead',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True, is_pure_virtual=True, visibility='private')
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::DcdChannelEncodings::DoWrite(ns3::Buffer::Iterator start) const [member function]
cls.add_method('DoWrite',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
return
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeAccessor *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeChecker *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeValue *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::CallbackImplBase *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter(ns3::DefaultDeleter<ns3::EventImpl> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::EventImpl > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::EventImpl>::Delete(ns3::EventImpl * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::EventImpl *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Hash::Implementation *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter(ns3::DefaultDeleter<ns3::NixVector> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::NixVector > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::NixVector>::Delete(ns3::NixVector * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::NixVector *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3OutputStreamWrapper_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::OutputStreamWrapper>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::OutputStreamWrapper>::DefaultDeleter(ns3::DefaultDeleter<ns3::OutputStreamWrapper> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::OutputStreamWrapper > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::OutputStreamWrapper>::Delete(ns3::OutputStreamWrapper * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::OutputStreamWrapper *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Packet>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Packet>::DefaultDeleter(ns3::DefaultDeleter<ns3::Packet> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Packet > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Packet>::Delete(ns3::Packet * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Packet *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::TraceSourceAccessor *', 'object')],
is_static=True)
return
def register_Ns3DlFramePrefixIe_methods(root_module, cls):
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::DlFramePrefixIe::DlFramePrefixIe(ns3::DlFramePrefixIe const & arg0) [constructor]
cls.add_constructor([param('ns3::DlFramePrefixIe const &', 'arg0')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::DlFramePrefixIe::DlFramePrefixIe() [constructor]
cls.add_constructor([])
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::DlFramePrefixIe::GetDiuc() const [member function]
cls.add_method('GetDiuc',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint16_t ns3::DlFramePrefixIe::GetLength() const [member function]
cls.add_method('GetLength',
'uint16_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::DlFramePrefixIe::GetPreamblePresent() const [member function]
cls.add_method('GetPreamblePresent',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::DlFramePrefixIe::GetRateId() const [member function]
cls.add_method('GetRateId',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint16_t ns3::DlFramePrefixIe::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint16_t ns3::DlFramePrefixIe::GetStartTime() const [member function]
cls.add_method('GetStartTime',
'uint16_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::Buffer::Iterator ns3::DlFramePrefixIe::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetDiuc(uint8_t diuc) [member function]
cls.add_method('SetDiuc',
'void',
[param('uint8_t', 'diuc')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetLength(uint16_t length) [member function]
cls.add_method('SetLength',
'void',
[param('uint16_t', 'length')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetPreamblePresent(uint8_t preamblePresent) [member function]
cls.add_method('SetPreamblePresent',
'void',
[param('uint8_t', 'preamblePresent')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetRateId(uint8_t rateId) [member function]
cls.add_method('SetRateId',
'void',
[param('uint8_t', 'rateId')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetStartTime(uint16_t startTime) [member function]
cls.add_method('SetStartTime',
'void',
[param('uint16_t', 'startTime')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::Buffer::Iterator ns3::DlFramePrefixIe::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3IpcsClassifierRecord_methods(root_module, cls):
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord::IpcsClassifierRecord(ns3::IpcsClassifierRecord const & arg0) [constructor]
cls.add_constructor([param('ns3::IpcsClassifierRecord const &', 'arg0')])
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord::IpcsClassifierRecord() [constructor]
cls.add_constructor([])
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord::IpcsClassifierRecord(ns3::Ipv4Address srcAddress, ns3::Ipv4Mask srcMask, ns3::Ipv4Address dstAddress, ns3::Ipv4Mask dstMask, uint16_t srcPortLow, uint16_t srcPortHigh, uint16_t dstPortLow, uint16_t dstPortHigh, uint8_t protocol, uint8_t priority) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'srcAddress'), param('ns3::Ipv4Mask', 'srcMask'), param('ns3::Ipv4Address', 'dstAddress'), param('ns3::Ipv4Mask', 'dstMask'), param('uint16_t', 'srcPortLow'), param('uint16_t', 'srcPortHigh'), param('uint16_t', 'dstPortLow'), param('uint16_t', 'dstPortHigh'), param('uint8_t', 'protocol'), param('uint8_t', 'priority')])
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord::IpcsClassifierRecord(ns3::Tlv tlv) [constructor]
cls.add_constructor([param('ns3::Tlv', 'tlv')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddDstAddr(ns3::Ipv4Address dstAddress, ns3::Ipv4Mask dstMask) [member function]
cls.add_method('AddDstAddr',
'void',
[param('ns3::Ipv4Address', 'dstAddress'), param('ns3::Ipv4Mask', 'dstMask')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddDstPortRange(uint16_t dstPortLow, uint16_t dstPortHigh) [member function]
cls.add_method('AddDstPortRange',
'void',
[param('uint16_t', 'dstPortLow'), param('uint16_t', 'dstPortHigh')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddProtocol(uint8_t proto) [member function]
cls.add_method('AddProtocol',
'void',
[param('uint8_t', 'proto')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddSrcAddr(ns3::Ipv4Address srcAddress, ns3::Ipv4Mask srcMask) [member function]
cls.add_method('AddSrcAddr',
'void',
[param('ns3::Ipv4Address', 'srcAddress'), param('ns3::Ipv4Mask', 'srcMask')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddSrcPortRange(uint16_t srcPortLow, uint16_t srcPortHigh) [member function]
cls.add_method('AddSrcPortRange',
'void',
[param('uint16_t', 'srcPortLow'), param('uint16_t', 'srcPortHigh')])
## ipcs-classifier-record.h (module 'wimax'): bool ns3::IpcsClassifierRecord::CheckMatch(ns3::Ipv4Address srcAddress, ns3::Ipv4Address dstAddress, uint16_t srcPort, uint16_t dstPort, uint8_t proto) const [member function]
cls.add_method('CheckMatch',
'bool',
[param('ns3::Ipv4Address', 'srcAddress'), param('ns3::Ipv4Address', 'dstAddress'), param('uint16_t', 'srcPort'), param('uint16_t', 'dstPort'), param('uint8_t', 'proto')],
is_const=True)
## ipcs-classifier-record.h (module 'wimax'): uint16_t ns3::IpcsClassifierRecord::GetCid() const [member function]
cls.add_method('GetCid',
'uint16_t',
[],
is_const=True)
## ipcs-classifier-record.h (module 'wimax'): uint16_t ns3::IpcsClassifierRecord::GetIndex() const [member function]
cls.add_method('GetIndex',
'uint16_t',
[],
is_const=True)
## ipcs-classifier-record.h (module 'wimax'): uint8_t ns3::IpcsClassifierRecord::GetPriority() const [member function]
cls.add_method('GetPriority',
'uint8_t',
[],
is_const=True)
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::SetCid(uint16_t cid) [member function]
cls.add_method('SetCid',
'void',
[param('uint16_t', 'cid')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::SetIndex(uint16_t index) [member function]
cls.add_method('SetIndex',
'void',
[param('uint16_t', 'index')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::SetPriority(uint8_t prio) [member function]
cls.add_method('SetPriority',
'void',
[param('uint8_t', 'prio')])
## ipcs-classifier-record.h (module 'wimax'): ns3::Tlv ns3::IpcsClassifierRecord::ToTlv() const [member function]
cls.add_method('ToTlv',
'ns3::Tlv',
[],
is_const=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True, deprecated=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac8Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac8Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LogLevel::LOG_NONE) [constructor]
cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LogLevel::LOG_NONE')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function]
cls.add_method('File',
'std::string',
[],
is_const=True)
## log.h (module 'core'): static ns3::LogComponent::ComponentList * ns3::LogComponent::GetComponentList() [member function]
cls.add_method('GetComponentList',
'ns3::LogComponent::ComponentList *',
[],
is_static=True)
## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function]
cls.add_method('GetLevelLabel',
'std::string',
[param('ns3::LogLevel const', 'level')],
is_static=True)
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel const', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::LogLevel const', 'level')])
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac8Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(ns3::Mac8Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac8Address const &', 'arg0')])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address() [constructor]
cls.add_constructor([])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(uint8_t addr) [constructor]
cls.add_constructor([param('uint8_t', 'addr')])
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac8Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyFrom(uint8_t const * pBuffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'pBuffer')])
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyTo(uint8_t * pBuffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'pBuffer')],
is_const=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static bool ns3::Mac8Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::Iterator ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'ns3::NetDeviceContainer::Iterator',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::Iterator ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'ns3::NetDeviceContainer::Iterator',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::Iterator ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'ns3::NodeContainer::Iterator',
[],
is_const=True)
## node-container.h (module 'network'): bool ns3::NodeContainer::Contains(uint32_t id) const [member function]
cls.add_method('Contains',
'bool',
[param('uint32_t', 'id')],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): ns3::NodeContainer::Iterator ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'ns3::NodeContainer::Iterator',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactory::IsTypeIdSet() const [member function]
cls.add_method('IsTypeIdSet',
'bool',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3OfdmDcdChannelEncodings_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDcdChannelEncodings::OfdmDcdChannelEncodings(ns3::OfdmDcdChannelEncodings const & arg0) [constructor]
cls.add_constructor([param('ns3::OfdmDcdChannelEncodings const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDcdChannelEncodings::OfdmDcdChannelEncodings() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): ns3::Mac48Address ns3::OfdmDcdChannelEncodings::GetBaseStationId() const [member function]
cls.add_method('GetBaseStationId',
'ns3::Mac48Address',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDcdChannelEncodings::GetChannelNr() const [member function]
cls.add_method('GetChannelNr',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDcdChannelEncodings::GetFrameDurationCode() const [member function]
cls.add_method('GetFrameDurationCode',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::OfdmDcdChannelEncodings::GetFrameNumber() const [member function]
cls.add_method('GetFrameNumber',
'uint32_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDcdChannelEncodings::GetRtg() const [member function]
cls.add_method('GetRtg',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmDcdChannelEncodings::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDcdChannelEncodings::GetTtg() const [member function]
cls.add_method('GetTtg',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetBaseStationId(ns3::Mac48Address baseStationId) [member function]
cls.add_method('SetBaseStationId',
'void',
[param('ns3::Mac48Address', 'baseStationId')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetChannelNr(uint8_t channelNr) [member function]
cls.add_method('SetChannelNr',
'void',
[param('uint8_t', 'channelNr')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetFrameDurationCode(uint8_t frameDurationCode) [member function]
cls.add_method('SetFrameDurationCode',
'void',
[param('uint8_t', 'frameDurationCode')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetFrameNumber(uint32_t frameNumber) [member function]
cls.add_method('SetFrameNumber',
'void',
[param('uint32_t', 'frameNumber')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetRtg(uint8_t rtg) [member function]
cls.add_method('SetRtg',
'void',
[param('uint8_t', 'rtg')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetTtg(uint8_t ttg) [member function]
cls.add_method('SetTtg',
'void',
[param('uint8_t', 'ttg')])
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDcdChannelEncodings::DoRead(ns3::Buffer::Iterator start) [member function]
cls.add_method('DoRead',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True, visibility='private')
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDcdChannelEncodings::DoWrite(ns3::Buffer::Iterator start) const [member function]
cls.add_method('DoWrite',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True, visibility='private')
return
def register_Ns3OfdmDlBurstProfile_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlBurstProfile::OfdmDlBurstProfile(ns3::OfdmDlBurstProfile const & arg0) [constructor]
cls.add_constructor([param('ns3::OfdmDlBurstProfile const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlBurstProfile::OfdmDlBurstProfile() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlBurstProfile::GetDiuc() const [member function]
cls.add_method('GetDiuc',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlBurstProfile::GetFecCodeType() const [member function]
cls.add_method('GetFecCodeType',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlBurstProfile::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmDlBurstProfile::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlBurstProfile::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDlBurstProfile::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlBurstProfile::SetDiuc(uint8_t diuc) [member function]
cls.add_method('SetDiuc',
'void',
[param('uint8_t', 'diuc')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlBurstProfile::SetFecCodeType(uint8_t fecCodeType) [member function]
cls.add_method('SetFecCodeType',
'void',
[param('uint8_t', 'fecCodeType')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlBurstProfile::SetLength(uint8_t length) [member function]
cls.add_method('SetLength',
'void',
[param('uint8_t', 'length')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlBurstProfile::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDlBurstProfile::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3OfdmDlMapIe_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlMapIe::OfdmDlMapIe(ns3::OfdmDlMapIe const & arg0) [constructor]
cls.add_constructor([param('ns3::OfdmDlMapIe const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlMapIe::OfdmDlMapIe() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): ns3::Cid ns3::OfdmDlMapIe::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlMapIe::GetDiuc() const [member function]
cls.add_method('GetDiuc',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlMapIe::GetPreamblePresent() const [member function]
cls.add_method('GetPreamblePresent',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmDlMapIe::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmDlMapIe::GetStartTime() const [member function]
cls.add_method('GetStartTime',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDlMapIe::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlMapIe::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlMapIe::SetDiuc(uint8_t diuc) [member function]
cls.add_method('SetDiuc',
'void',
[param('uint8_t', 'diuc')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlMapIe::SetPreamblePresent(uint8_t preamblePresent) [member function]
cls.add_method('SetPreamblePresent',
'void',
[param('uint8_t', 'preamblePresent')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlMapIe::SetStartTime(uint16_t startTime) [member function]
cls.add_method('SetStartTime',
'void',
[param('uint16_t', 'startTime')])
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDlMapIe::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3OfdmUlBurstProfile_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlBurstProfile::OfdmUlBurstProfile(ns3::OfdmUlBurstProfile const & arg0) [constructor]
cls.add_constructor([param('ns3::OfdmUlBurstProfile const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlBurstProfile::OfdmUlBurstProfile() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlBurstProfile::GetFecCodeType() const [member function]
cls.add_method('GetFecCodeType',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlBurstProfile::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUlBurstProfile::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlBurstProfile::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlBurstProfile::GetUiuc() const [member function]
cls.add_method('GetUiuc',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUlBurstProfile::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlBurstProfile::SetFecCodeType(uint8_t fecCodeType) [member function]
cls.add_method('SetFecCodeType',
'void',
[param('uint8_t', 'fecCodeType')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlBurstProfile::SetLength(uint8_t length) [member function]
cls.add_method('SetLength',
'void',
[param('uint8_t', 'length')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlBurstProfile::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlBurstProfile::SetUiuc(uint8_t uiuc) [member function]
cls.add_method('SetUiuc',
'void',
[param('uint8_t', 'uiuc')])
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUlBurstProfile::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3OfdmUlMapIe_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlMapIe::OfdmUlMapIe(ns3::OfdmUlMapIe const & arg0) [constructor]
cls.add_constructor([param('ns3::OfdmUlMapIe const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlMapIe::OfdmUlMapIe() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): ns3::Cid ns3::OfdmUlMapIe::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUlMapIe::GetDuration() const [member function]
cls.add_method('GetDuration',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlMapIe::GetMidambleRepetitionInterval() const [member function]
cls.add_method('GetMidambleRepetitionInterval',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUlMapIe::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUlMapIe::GetStartTime() const [member function]
cls.add_method('GetStartTime',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlMapIe::GetSubchannelIndex() const [member function]
cls.add_method('GetSubchannelIndex',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlMapIe::GetUiuc() const [member function]
cls.add_method('GetUiuc',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUlMapIe::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetCid(ns3::Cid const & cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid const &', 'cid')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetDuration(uint16_t duration) [member function]
cls.add_method('SetDuration',
'void',
[param('uint16_t', 'duration')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetMidambleRepetitionInterval(uint8_t midambleRepetitionInterval) [member function]
cls.add_method('SetMidambleRepetitionInterval',
'void',
[param('uint8_t', 'midambleRepetitionInterval')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetStartTime(uint16_t startTime) [member function]
cls.add_method('SetStartTime',
'void',
[param('uint16_t', 'startTime')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetSubchannelIndex(uint8_t subchannelIndex) [member function]
cls.add_method('SetSubchannelIndex',
'void',
[param('uint8_t', 'subchannelIndex')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetUiuc(uint8_t uiuc) [member function]
cls.add_method('SetUiuc',
'void',
[param('uint8_t', 'uiuc')])
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUlMapIe::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::type [variable]
cls.add_instance_attribute('type', 'ns3::PacketMetadata::Item::ItemType', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3ParameterLogger_methods(root_module, cls):
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [constructor]
cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')])
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor]
cls.add_constructor([param('std::ostream &', 'os')])
return
def register_Ns3PcapFile_methods(root_module, cls):
## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor]
cls.add_constructor([])
## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t & packets, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function]
cls.add_method('Diff',
'bool',
[param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t &', 'packets'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')],
is_static=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function]
cls.add_method('GetSwapMode',
'bool',
[])
## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false, bool nanosecMode=false) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false'), param('bool', 'nanosecMode', default_value='false')])
## pcap-file.h (module 'network'): bool ns3::PcapFile::IsNanoSecMode() [member function]
cls.add_method('IsNanoSecMode',
'bool',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::ios_base::openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::ios_base::openmode', 'mode')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function]
cls.add_method('Read',
'void',
[param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header const & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header const &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable]
cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True)
## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable]
cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True)
return
def register_Ns3PcapHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::ios_base::openmode filemode, ns3::PcapHelper::DataLinkType dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=0) [member function]
cls.add_method('CreateFile',
'ns3::Ptr< ns3::PcapFileWrapper >',
[param('std::string', 'filename'), param('std::ios_base::openmode', 'filemode'), param('ns3::PcapHelper::DataLinkType', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='0')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3PcapHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [constructor]
cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function]
cls.add_method('EnablePcapAll',
'void',
[param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
is_virtual=True, is_pure_virtual=True)
return
def register_Ns3SNRToBlockErrorRateManager_methods(root_module, cls):
## snr-to-block-error-rate-manager.h (module 'wimax'): ns3::SNRToBlockErrorRateManager::SNRToBlockErrorRateManager(ns3::SNRToBlockErrorRateManager const & arg0) [constructor]
cls.add_constructor([param('ns3::SNRToBlockErrorRateManager const &', 'arg0')])
## snr-to-block-error-rate-manager.h (module 'wimax'): ns3::SNRToBlockErrorRateManager::SNRToBlockErrorRateManager() [constructor]
cls.add_constructor([])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::ActivateLoss(bool loss) [member function]
cls.add_method('ActivateLoss',
'void',
[param('bool', 'loss')])
## snr-to-block-error-rate-manager.h (module 'wimax'): double ns3::SNRToBlockErrorRateManager::GetBlockErrorRate(double SNR, uint8_t modulation) [member function]
cls.add_method('GetBlockErrorRate',
'double',
[param('double', 'SNR'), param('uint8_t', 'modulation')])
## snr-to-block-error-rate-manager.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord * ns3::SNRToBlockErrorRateManager::GetSNRToBlockErrorRateRecord(double SNR, uint8_t modulation) [member function]
cls.add_method('GetSNRToBlockErrorRateRecord',
'ns3::SNRToBlockErrorRateRecord *',
[param('double', 'SNR'), param('uint8_t', 'modulation')])
## snr-to-block-error-rate-manager.h (module 'wimax'): std::string ns3::SNRToBlockErrorRateManager::GetTraceFilePath() [member function]
cls.add_method('GetTraceFilePath',
'std::string',
[])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::LoadDefaultTraces() [member function]
cls.add_method('LoadDefaultTraces',
'void',
[])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::LoadTraces() [member function]
cls.add_method('LoadTraces',
'void',
[])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::ReLoadTraces() [member function]
cls.add_method('ReLoadTraces',
'void',
[])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::SetTraceFilePath(char * traceFilePath) [member function]
cls.add_method('SetTraceFilePath',
'void',
[param('char *', 'traceFilePath')])
return
def register_Ns3SNRToBlockErrorRateRecord_methods(root_module, cls):
## snr-to-block-error-rate-record.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord::SNRToBlockErrorRateRecord(ns3::SNRToBlockErrorRateRecord const & arg0) [constructor]
cls.add_constructor([param('ns3::SNRToBlockErrorRateRecord const &', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord::SNRToBlockErrorRateRecord(double snrValue, double bitErrorRate, double BlockErrorRate, double sigma2, double I1, double I2) [constructor]
cls.add_constructor([param('double', 'snrValue'), param('double', 'bitErrorRate'), param('double', 'BlockErrorRate'), param('double', 'sigma2'), param('double', 'I1'), param('double', 'I2')])
## snr-to-block-error-rate-record.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord * ns3::SNRToBlockErrorRateRecord::Copy() [member function]
cls.add_method('Copy',
'ns3::SNRToBlockErrorRateRecord *',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetBitErrorRate() [member function]
cls.add_method('GetBitErrorRate',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetBlockErrorRate() [member function]
cls.add_method('GetBlockErrorRate',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetI1() [member function]
cls.add_method('GetI1',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetI2() [member function]
cls.add_method('GetI2',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetSNRValue() [member function]
cls.add_method('GetSNRValue',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetSigma2() [member function]
cls.add_method('GetSigma2',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetBitErrorRate(double arg0) [member function]
cls.add_method('SetBitErrorRate',
'void',
[param('double', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetBlockErrorRate(double arg0) [member function]
cls.add_method('SetBlockErrorRate',
'void',
[param('double', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetI1(double arg0) [member function]
cls.add_method('SetI1',
'void',
[param('double', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetI2(double arg0) [member function]
cls.add_method('SetI2',
'void',
[param('double', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetSNRValue(double arg0) [member function]
cls.add_method('SetSNRValue',
'void',
[param('double', 'arg0')])
return
def register_Ns3SSRecord_methods(root_module, cls):
## ss-record.h (module 'wimax'): ns3::SSRecord::SSRecord(ns3::SSRecord const & arg0) [constructor]
cls.add_constructor([param('ns3::SSRecord const &', 'arg0')])
## ss-record.h (module 'wimax'): ns3::SSRecord::SSRecord() [constructor]
cls.add_constructor([])
## ss-record.h (module 'wimax'): ns3::SSRecord::SSRecord(ns3::Mac48Address macAddress) [constructor]
cls.add_constructor([param('ns3::Mac48Address', 'macAddress')])
## ss-record.h (module 'wimax'): ns3::SSRecord::SSRecord(ns3::Mac48Address macAddress, ns3::Ipv4Address IPaddress) [constructor]
cls.add_constructor([param('ns3::Mac48Address', 'macAddress'), param('ns3::Ipv4Address', 'IPaddress')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::AddServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::DisablePollForRanging() [member function]
cls.add_method('DisablePollForRanging',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::EnablePollForRanging() [member function]
cls.add_method('EnablePollForRanging',
'void',
[])
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetAreServiceFlowsAllocated() const [member function]
cls.add_method('GetAreServiceFlowsAllocated',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::Cid ns3::SSRecord::GetBasicCid() const [member function]
cls.add_method('GetBasicCid',
'ns3::Cid',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::DsaRsp ns3::SSRecord::GetDsaRsp() const [member function]
cls.add_method('GetDsaRsp',
'ns3::DsaRsp',
[],
is_const=True)
## ss-record.h (module 'wimax'): uint8_t ns3::SSRecord::GetDsaRspRetries() const [member function]
cls.add_method('GetDsaRspRetries',
'uint8_t',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetHasServiceFlowBe() const [member function]
cls.add_method('GetHasServiceFlowBe',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetHasServiceFlowNrtps() const [member function]
cls.add_method('GetHasServiceFlowNrtps',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetHasServiceFlowRtps() const [member function]
cls.add_method('GetHasServiceFlowRtps',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetHasServiceFlowUgs() const [member function]
cls.add_method('GetHasServiceFlowUgs',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::Ipv4Address ns3::SSRecord::GetIPAddress() [member function]
cls.add_method('GetIPAddress',
'ns3::Ipv4Address',
[])
## ss-record.h (module 'wimax'): uint8_t ns3::SSRecord::GetInvitedRangRetries() const [member function]
cls.add_method('GetInvitedRangRetries',
'uint8_t',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetIsBroadcastSS() [member function]
cls.add_method('GetIsBroadcastSS',
'bool',
[])
## ss-record.h (module 'wimax'): ns3::Mac48Address ns3::SSRecord::GetMacAddress() const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::WimaxPhy::ModulationType ns3::SSRecord::GetModulationType() const [member function]
cls.add_method('GetModulationType',
'ns3::WimaxPhy::ModulationType',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetPollForRanging() const [member function]
cls.add_method('GetPollForRanging',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetPollMeBit() const [member function]
cls.add_method('GetPollMeBit',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::Cid ns3::SSRecord::GetPrimaryCid() const [member function]
cls.add_method('GetPrimaryCid',
'ns3::Cid',
[],
is_const=True)
## ss-record.h (module 'wimax'): uint8_t ns3::SSRecord::GetRangingCorrectionRetries() const [member function]
cls.add_method('GetRangingCorrectionRetries',
'uint8_t',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::WimaxNetDevice::RangingStatus ns3::SSRecord::GetRangingStatus() const [member function]
cls.add_method('GetRangingStatus',
'ns3::WimaxNetDevice::RangingStatus',
[],
is_const=True)
## ss-record.h (module 'wimax'): std::vector<ns3::ServiceFlow *, std::allocator<ns3::ServiceFlow *> > ns3::SSRecord::GetServiceFlows(ns3::ServiceFlow::SchedulingType schedulingType) const [member function]
cls.add_method('GetServiceFlows',
'std::vector< ns3::ServiceFlow * >',
[param('ns3::ServiceFlow::SchedulingType', 'schedulingType')],
is_const=True)
## ss-record.h (module 'wimax'): uint16_t ns3::SSRecord::GetSfTransactionId() const [member function]
cls.add_method('GetSfTransactionId',
'uint16_t',
[],
is_const=True)
## ss-record.h (module 'wimax'): void ns3::SSRecord::IncrementDsaRspRetries() [member function]
cls.add_method('IncrementDsaRspRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::IncrementInvitedRangingRetries() [member function]
cls.add_method('IncrementInvitedRangingRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::IncrementRangingCorrectionRetries() [member function]
cls.add_method('IncrementRangingCorrectionRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::ResetInvitedRangingRetries() [member function]
cls.add_method('ResetInvitedRangingRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::ResetRangingCorrectionRetries() [member function]
cls.add_method('ResetRangingCorrectionRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetAreServiceFlowsAllocated(bool val) [member function]
cls.add_method('SetAreServiceFlowsAllocated',
'void',
[param('bool', 'val')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetBasicCid(ns3::Cid basicCid) [member function]
cls.add_method('SetBasicCid',
'void',
[param('ns3::Cid', 'basicCid')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetDsaRsp(ns3::DsaRsp dsaRsp) [member function]
cls.add_method('SetDsaRsp',
'void',
[param('ns3::DsaRsp', 'dsaRsp')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetDsaRspRetries(uint8_t dsaRspRetries) [member function]
cls.add_method('SetDsaRspRetries',
'void',
[param('uint8_t', 'dsaRspRetries')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetIPAddress(ns3::Ipv4Address IPaddress) [member function]
cls.add_method('SetIPAddress',
'void',
[param('ns3::Ipv4Address', 'IPaddress')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetIsBroadcastSS(bool broadcast_enable) [member function]
cls.add_method('SetIsBroadcastSS',
'void',
[param('bool', 'broadcast_enable')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetMacAddress(ns3::Mac48Address macAddress) [member function]
cls.add_method('SetMacAddress',
'void',
[param('ns3::Mac48Address', 'macAddress')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetModulationType(ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('SetModulationType',
'void',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetPollMeBit(bool pollMeBit) [member function]
cls.add_method('SetPollMeBit',
'void',
[param('bool', 'pollMeBit')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetPrimaryCid(ns3::Cid primaryCid) [member function]
cls.add_method('SetPrimaryCid',
'void',
[param('ns3::Cid', 'primaryCid')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetRangingStatus(ns3::WimaxNetDevice::RangingStatus rangingStatus) [member function]
cls.add_method('SetRangingStatus',
'void',
[param('ns3::WimaxNetDevice::RangingStatus', 'rangingStatus')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetSfTransactionId(uint16_t sfTransactionId) [member function]
cls.add_method('SetSfTransactionId',
'void',
[param('uint16_t', 'sfTransactionId')])
return
def register_Ns3SendParams_methods(root_module, cls):
## send-params.h (module 'wimax'): ns3::SendParams::SendParams(ns3::SendParams const & arg0) [constructor]
cls.add_constructor([param('ns3::SendParams const &', 'arg0')])
## send-params.h (module 'wimax'): ns3::SendParams::SendParams() [constructor]
cls.add_constructor([])
return
def register_Ns3ServiceFlow_methods(root_module, cls):
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow(ns3::Tlv tlv) [constructor]
cls.add_constructor([param('ns3::Tlv', 'tlv')])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow() [constructor]
cls.add_constructor([])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow(ns3::ServiceFlow::Direction direction) [constructor]
cls.add_constructor([param('ns3::ServiceFlow::Direction', 'direction')])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow(ns3::ServiceFlow const & sf) [constructor]
cls.add_constructor([param('ns3::ServiceFlow const &', 'sf')])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow(uint32_t sfid, ns3::ServiceFlow::Direction direction, ns3::Ptr<ns3::WimaxConnection> connection) [constructor]
cls.add_constructor([param('uint32_t', 'sfid'), param('ns3::ServiceFlow::Direction', 'direction'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection')])
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::CheckClassifierMatch(ns3::Ipv4Address srcAddress, ns3::Ipv4Address dstAddress, uint16_t srcPort, uint16_t dstPort, uint8_t proto) const [member function]
cls.add_method('CheckClassifierMatch',
'bool',
[param('ns3::Ipv4Address', 'srcAddress'), param('ns3::Ipv4Address', 'dstAddress'), param('uint16_t', 'srcPort'), param('uint16_t', 'dstPort'), param('uint8_t', 'proto')],
is_const=True)
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::CleanUpQueue() [member function]
cls.add_method('CleanUpQueue',
'void',
[])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::CopyParametersFrom(ns3::ServiceFlow sf) [member function]
cls.add_method('CopyParametersFrom',
'void',
[param('ns3::ServiceFlow', 'sf')])
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqBlockLifeTime() const [member function]
cls.add_method('GetArqBlockLifeTime',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqBlockSize() const [member function]
cls.add_method('GetArqBlockSize',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetArqDeliverInOrder() const [member function]
cls.add_method('GetArqDeliverInOrder',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetArqEnable() const [member function]
cls.add_method('GetArqEnable',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqPurgeTimeout() const [member function]
cls.add_method('GetArqPurgeTimeout',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqRetryTimeoutRx() const [member function]
cls.add_method('GetArqRetryTimeoutRx',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqRetryTimeoutTx() const [member function]
cls.add_method('GetArqRetryTimeoutTx',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqSyncLoss() const [member function]
cls.add_method('GetArqSyncLoss',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqWindowSize() const [member function]
cls.add_method('GetArqWindowSize',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetCid() const [member function]
cls.add_method('GetCid',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::ServiceFlow::GetConnection() const [member function]
cls.add_method('GetConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::CsParameters ns3::ServiceFlow::GetConvergenceSublayerParam() const [member function]
cls.add_method('GetConvergenceSublayerParam',
'ns3::CsParameters',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::CsSpecification ns3::ServiceFlow::GetCsSpecification() const [member function]
cls.add_method('GetCsSpecification',
'ns3::ServiceFlow::CsSpecification',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::Direction ns3::ServiceFlow::GetDirection() const [member function]
cls.add_method('GetDirection',
'ns3::ServiceFlow::Direction',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetFixedversusVariableSduIndicator() const [member function]
cls.add_method('GetFixedversusVariableSduIndicator',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::GetIsEnabled() const [member function]
cls.add_method('GetIsEnabled',
'bool',
[],
is_const=True)
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::GetIsMulticast() const [member function]
cls.add_method('GetIsMulticast',
'bool',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMaxSustainedTrafficRate() const [member function]
cls.add_method('GetMaxSustainedTrafficRate',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMaxTrafficBurst() const [member function]
cls.add_method('GetMaxTrafficBurst',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMaximumLatency() const [member function]
cls.add_method('GetMaximumLatency',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMinReservedTrafficRate() const [member function]
cls.add_method('GetMinReservedTrafficRate',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMinTolerableTrafficRate() const [member function]
cls.add_method('GetMinTolerableTrafficRate',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::WimaxPhy::ModulationType ns3::ServiceFlow::GetModulation() const [member function]
cls.add_method('GetModulation',
'ns3::WimaxPhy::ModulationType',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetQosParamSetType() const [member function]
cls.add_method('GetQosParamSetType',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::Ptr<ns3::WimaxMacQueue> ns3::ServiceFlow::GetQueue() const [member function]
cls.add_method('GetQueue',
'ns3::Ptr< ns3::WimaxMacQueue >',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlowRecord * ns3::ServiceFlow::GetRecord() const [member function]
cls.add_method('GetRecord',
'ns3::ServiceFlowRecord *',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetRequestTransmissionPolicy() const [member function]
cls.add_method('GetRequestTransmissionPolicy',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::SchedulingType ns3::ServiceFlow::GetSchedulingType() const [member function]
cls.add_method('GetSchedulingType',
'ns3::ServiceFlow::SchedulingType',
[],
is_const=True)
## service-flow.h (module 'wimax'): char * ns3::ServiceFlow::GetSchedulingTypeStr() const [member function]
cls.add_method('GetSchedulingTypeStr',
'char *',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetSduSize() const [member function]
cls.add_method('GetSduSize',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): std::string ns3::ServiceFlow::GetServiceClassName() const [member function]
cls.add_method('GetServiceClassName',
'std::string',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::SchedulingType ns3::ServiceFlow::GetServiceSchedulingType() const [member function]
cls.add_method('GetServiceSchedulingType',
'ns3::ServiceFlow::SchedulingType',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetSfid() const [member function]
cls.add_method('GetSfid',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetTargetSAID() const [member function]
cls.add_method('GetTargetSAID',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetToleratedJitter() const [member function]
cls.add_method('GetToleratedJitter',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetTrafficPriority() const [member function]
cls.add_method('GetTrafficPriority',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::Type ns3::ServiceFlow::GetType() const [member function]
cls.add_method('GetType',
'ns3::ServiceFlow::Type',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetUnsolicitedGrantInterval() const [member function]
cls.add_method('GetUnsolicitedGrantInterval',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetUnsolicitedPollingInterval() const [member function]
cls.add_method('GetUnsolicitedPollingInterval',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::HasPackets() const [member function]
cls.add_method('HasPackets',
'bool',
[],
is_const=True)
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::HasPackets(ns3::MacHeaderType::HeaderType packetType) const [member function]
cls.add_method('HasPackets',
'bool',
[param('ns3::MacHeaderType::HeaderType', 'packetType')],
is_const=True)
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::InitValues() [member function]
cls.add_method('InitValues',
'void',
[])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::PrintQoSParameters() const [member function]
cls.add_method('PrintQoSParameters',
'void',
[],
is_const=True)
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqBlockLifeTime(uint16_t lifeTime) [member function]
cls.add_method('SetArqBlockLifeTime',
'void',
[param('uint16_t', 'lifeTime')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqBlockSize(uint16_t size) [member function]
cls.add_method('SetArqBlockSize',
'void',
[param('uint16_t', 'size')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqDeliverInOrder(uint8_t inOrder) [member function]
cls.add_method('SetArqDeliverInOrder',
'void',
[param('uint8_t', 'inOrder')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqEnable(uint8_t arqEnable) [member function]
cls.add_method('SetArqEnable',
'void',
[param('uint8_t', 'arqEnable')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqPurgeTimeout(uint16_t timeout) [member function]
cls.add_method('SetArqPurgeTimeout',
'void',
[param('uint16_t', 'timeout')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqRetryTimeoutRx(uint16_t timeout) [member function]
cls.add_method('SetArqRetryTimeoutRx',
'void',
[param('uint16_t', 'timeout')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqRetryTimeoutTx(uint16_t timeout) [member function]
cls.add_method('SetArqRetryTimeoutTx',
'void',
[param('uint16_t', 'timeout')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqSyncLoss(uint16_t syncLoss) [member function]
cls.add_method('SetArqSyncLoss',
'void',
[param('uint16_t', 'syncLoss')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqWindowSize(uint16_t arqWindowSize) [member function]
cls.add_method('SetArqWindowSize',
'void',
[param('uint16_t', 'arqWindowSize')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetConnection(ns3::Ptr<ns3::WimaxConnection> connection) [member function]
cls.add_method('SetConnection',
'void',
[param('ns3::Ptr< ns3::WimaxConnection >', 'connection')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetConvergenceSublayerParam(ns3::CsParameters csparam) [member function]
cls.add_method('SetConvergenceSublayerParam',
'void',
[param('ns3::CsParameters', 'csparam')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetCsSpecification(ns3::ServiceFlow::CsSpecification spec) [member function]
cls.add_method('SetCsSpecification',
'void',
[param('ns3::ServiceFlow::CsSpecification', 'spec')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetDirection(ns3::ServiceFlow::Direction direction) [member function]
cls.add_method('SetDirection',
'void',
[param('ns3::ServiceFlow::Direction', 'direction')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetFixedversusVariableSduIndicator(uint8_t sduIndicator) [member function]
cls.add_method('SetFixedversusVariableSduIndicator',
'void',
[param('uint8_t', 'sduIndicator')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetIsEnabled(bool isEnabled) [member function]
cls.add_method('SetIsEnabled',
'void',
[param('bool', 'isEnabled')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetIsMulticast(bool isMulticast) [member function]
cls.add_method('SetIsMulticast',
'void',
[param('bool', 'isMulticast')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMaxSustainedTrafficRate(uint32_t maxSustainedRate) [member function]
cls.add_method('SetMaxSustainedTrafficRate',
'void',
[param('uint32_t', 'maxSustainedRate')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMaxTrafficBurst(uint32_t maxTrafficBurst) [member function]
cls.add_method('SetMaxTrafficBurst',
'void',
[param('uint32_t', 'maxTrafficBurst')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMaximumLatency(uint32_t MaximumLatency) [member function]
cls.add_method('SetMaximumLatency',
'void',
[param('uint32_t', 'MaximumLatency')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMinReservedTrafficRate(uint32_t minResvRate) [member function]
cls.add_method('SetMinReservedTrafficRate',
'void',
[param('uint32_t', 'minResvRate')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMinTolerableTrafficRate(uint32_t minJitter) [member function]
cls.add_method('SetMinTolerableTrafficRate',
'void',
[param('uint32_t', 'minJitter')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetModulation(ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('SetModulation',
'void',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetQosParamSetType(uint8_t type) [member function]
cls.add_method('SetQosParamSetType',
'void',
[param('uint8_t', 'type')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetRecord(ns3::ServiceFlowRecord * record) [member function]
cls.add_method('SetRecord',
'void',
[param('ns3::ServiceFlowRecord *', 'record')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetRequestTransmissionPolicy(uint32_t policy) [member function]
cls.add_method('SetRequestTransmissionPolicy',
'void',
[param('uint32_t', 'policy')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetSduSize(uint8_t sduSize) [member function]
cls.add_method('SetSduSize',
'void',
[param('uint8_t', 'sduSize')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetServiceClassName(std::string name) [member function]
cls.add_method('SetServiceClassName',
'void',
[param('std::string', 'name')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetServiceSchedulingType(ns3::ServiceFlow::SchedulingType schedType) [member function]
cls.add_method('SetServiceSchedulingType',
'void',
[param('ns3::ServiceFlow::SchedulingType', 'schedType')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetSfid(uint32_t sfid) [member function]
cls.add_method('SetSfid',
'void',
[param('uint32_t', 'sfid')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetTargetSAID(uint16_t targetSaid) [member function]
cls.add_method('SetTargetSAID',
'void',
[param('uint16_t', 'targetSaid')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetToleratedJitter(uint32_t jitter) [member function]
cls.add_method('SetToleratedJitter',
'void',
[param('uint32_t', 'jitter')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetTrafficPriority(uint8_t priority) [member function]
cls.add_method('SetTrafficPriority',
'void',
[param('uint8_t', 'priority')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetType(ns3::ServiceFlow::Type type) [member function]
cls.add_method('SetType',
'void',
[param('ns3::ServiceFlow::Type', 'type')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetUnsolicitedGrantInterval(uint16_t unsolicitedGrantInterval) [member function]
cls.add_method('SetUnsolicitedGrantInterval',
'void',
[param('uint16_t', 'unsolicitedGrantInterval')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetUnsolicitedPollingInterval(uint16_t unsolicitedPollingInterval) [member function]
cls.add_method('SetUnsolicitedPollingInterval',
'void',
[param('uint16_t', 'unsolicitedPollingInterval')])
## service-flow.h (module 'wimax'): ns3::Tlv ns3::ServiceFlow::ToTlv() const [member function]
cls.add_method('ToTlv',
'ns3::Tlv',
[],
is_const=True)
return
def register_Ns3ServiceFlowRecord_methods(root_module, cls):
## service-flow-record.h (module 'wimax'): ns3::ServiceFlowRecord::ServiceFlowRecord(ns3::ServiceFlowRecord const & arg0) [constructor]
cls.add_constructor([param('ns3::ServiceFlowRecord const &', 'arg0')])
## service-flow-record.h (module 'wimax'): ns3::ServiceFlowRecord::ServiceFlowRecord() [constructor]
cls.add_constructor([])
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBacklogged() const [member function]
cls.add_method('GetBacklogged',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBackloggedTemp() const [member function]
cls.add_method('GetBackloggedTemp',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBwSinceLastExpiry() [member function]
cls.add_method('GetBwSinceLastExpiry',
'uint32_t',
[])
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBytesRcvd() const [member function]
cls.add_method('GetBytesRcvd',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBytesSent() const [member function]
cls.add_method('GetBytesSent',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): ns3::Time ns3::ServiceFlowRecord::GetDlTimeStamp() const [member function]
cls.add_method('GetDlTimeStamp',
'ns3::Time',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetGrantSize() const [member function]
cls.add_method('GetGrantSize',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): ns3::Time ns3::ServiceFlowRecord::GetGrantTimeStamp() const [member function]
cls.add_method('GetGrantTimeStamp',
'ns3::Time',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetGrantedBandwidth() [member function]
cls.add_method('GetGrantedBandwidth',
'uint32_t',
[])
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetGrantedBandwidthTemp() [member function]
cls.add_method('GetGrantedBandwidthTemp',
'uint32_t',
[])
## service-flow-record.h (module 'wimax'): ns3::Time ns3::ServiceFlowRecord::GetLastGrantTime() const [member function]
cls.add_method('GetLastGrantTime',
'ns3::Time',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetPktsRcvd() const [member function]
cls.add_method('GetPktsRcvd',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetPktsSent() const [member function]
cls.add_method('GetPktsSent',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetRequestedBandwidth() [member function]
cls.add_method('GetRequestedBandwidth',
'uint32_t',
[])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::IncreaseBacklogged(uint32_t backlogged) [member function]
cls.add_method('IncreaseBacklogged',
'void',
[param('uint32_t', 'backlogged')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::IncreaseBackloggedTemp(uint32_t backloggedTemp) [member function]
cls.add_method('IncreaseBackloggedTemp',
'void',
[param('uint32_t', 'backloggedTemp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBacklogged(uint32_t backlogged) [member function]
cls.add_method('SetBacklogged',
'void',
[param('uint32_t', 'backlogged')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBackloggedTemp(uint32_t backloggedTemp) [member function]
cls.add_method('SetBackloggedTemp',
'void',
[param('uint32_t', 'backloggedTemp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBwSinceLastExpiry(uint32_t bwSinceLastExpiry) [member function]
cls.add_method('SetBwSinceLastExpiry',
'void',
[param('uint32_t', 'bwSinceLastExpiry')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBytesRcvd(uint32_t bytesRcvd) [member function]
cls.add_method('SetBytesRcvd',
'void',
[param('uint32_t', 'bytesRcvd')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBytesSent(uint32_t bytesSent) [member function]
cls.add_method('SetBytesSent',
'void',
[param('uint32_t', 'bytesSent')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetDlTimeStamp(ns3::Time dlTimeStamp) [member function]
cls.add_method('SetDlTimeStamp',
'void',
[param('ns3::Time', 'dlTimeStamp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetGrantSize(uint32_t grantSize) [member function]
cls.add_method('SetGrantSize',
'void',
[param('uint32_t', 'grantSize')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetGrantTimeStamp(ns3::Time grantTimeStamp) [member function]
cls.add_method('SetGrantTimeStamp',
'void',
[param('ns3::Time', 'grantTimeStamp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetGrantedBandwidth(uint32_t grantedBandwidth) [member function]
cls.add_method('SetGrantedBandwidth',
'void',
[param('uint32_t', 'grantedBandwidth')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetGrantedBandwidthTemp(uint32_t grantedBandwidthTemp) [member function]
cls.add_method('SetGrantedBandwidthTemp',
'void',
[param('uint32_t', 'grantedBandwidthTemp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetLastGrantTime(ns3::Time grantTime) [member function]
cls.add_method('SetLastGrantTime',
'void',
[param('ns3::Time', 'grantTime')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetPktsRcvd(uint32_t pktsRcvd) [member function]
cls.add_method('SetPktsRcvd',
'void',
[param('uint32_t', 'pktsRcvd')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetPktsSent(uint32_t pktsSent) [member function]
cls.add_method('SetPktsSent',
'void',
[param('uint32_t', 'pktsSent')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetRequestedBandwidth(uint32_t requestedBandwidth) [member function]
cls.add_method('SetRequestedBandwidth',
'void',
[param('uint32_t', 'requestedBandwidth')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateBwSinceLastExpiry(uint32_t bwSinceLastExpiry) [member function]
cls.add_method('UpdateBwSinceLastExpiry',
'void',
[param('uint32_t', 'bwSinceLastExpiry')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateBytesRcvd(uint32_t bytesRcvd) [member function]
cls.add_method('UpdateBytesRcvd',
'void',
[param('uint32_t', 'bytesRcvd')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateBytesSent(uint32_t bytesSent) [member function]
cls.add_method('UpdateBytesSent',
'void',
[param('uint32_t', 'bytesSent')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateGrantedBandwidth(uint32_t grantedBandwidth) [member function]
cls.add_method('UpdateGrantedBandwidth',
'void',
[param('uint32_t', 'grantedBandwidth')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateGrantedBandwidthTemp(uint32_t grantedBandwidthTemp) [member function]
cls.add_method('UpdateGrantedBandwidthTemp',
'void',
[param('uint32_t', 'grantedBandwidthTemp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdatePktsRcvd(uint32_t pktsRcvd) [member function]
cls.add_method('UpdatePktsRcvd',
'void',
[param('uint32_t', 'pktsRcvd')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdatePktsSent(uint32_t pktsSent) [member function]
cls.add_method('UpdatePktsSent',
'void',
[param('uint32_t', 'pktsSent')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateRequestedBandwidth(uint32_t requestedBandwidth) [member function]
cls.add_method('UpdateRequestedBandwidth',
'void',
[param('uint32_t', 'requestedBandwidth')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static uint64_t ns3::Simulator::GetEventCount() [member function]
cls.add_method('GetEventCount',
'uint64_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True, is_pure_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t v) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t v) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::TlvValue::TlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::TlvValue::TlvValue(ns3::TlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::TlvValue * ns3::TlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::TlvValue *',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::TlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLen) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLen')],
is_virtual=True, is_pure_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::TlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::TlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3TosTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue::TosTlvValue(ns3::TosTlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TosTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue::TosTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue::TosTlvValue(uint8_t low, uint8_t high, uint8_t mask) [constructor]
cls.add_constructor([param('uint8_t', 'low'), param('uint8_t', 'high'), param('uint8_t', 'mask')])
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue * ns3::TosTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::TosTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::TosTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::TosTlvValue::GetHigh() const [member function]
cls.add_method('GetHigh',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::TosTlvValue::GetLow() const [member function]
cls.add_method('GetLow',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::TosTlvValue::GetMask() const [member function]
cls.add_method('GetMask',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::TosTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::TosTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'ns3::TypeId::hash_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint16_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint16_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3U16TlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue::U16TlvValue(ns3::U16TlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::U16TlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue::U16TlvValue(uint16_t value) [constructor]
cls.add_constructor([param('uint16_t', 'value')])
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue::U16TlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue * ns3::U16TlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::U16TlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U16TlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLen) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLen')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U16TlvValue::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')])
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U16TlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint16_t ns3::U16TlvValue::GetValue() const [member function]
cls.add_method('GetValue',
'uint16_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): void ns3::U16TlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3U32TlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue::U32TlvValue(ns3::U32TlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::U32TlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue::U32TlvValue(uint32_t value) [constructor]
cls.add_constructor([param('uint32_t', 'value')])
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue::U32TlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue * ns3::U32TlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::U32TlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U32TlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLen) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLen')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U32TlvValue::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')])
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U32TlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U32TlvValue::GetValue() const [member function]
cls.add_method('GetValue',
'uint32_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): void ns3::U32TlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3U8TlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue::U8TlvValue(ns3::U8TlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::U8TlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue::U8TlvValue(uint8_t value) [constructor]
cls.add_constructor([param('uint8_t', 'value')])
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue::U8TlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue * ns3::U8TlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::U8TlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U8TlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLen) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLen')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U8TlvValue::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')])
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U8TlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::U8TlvValue::GetValue() const [member function]
cls.add_method('GetValue',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): void ns3::U8TlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3UcdChannelEncodings_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::UcdChannelEncodings::UcdChannelEncodings(ns3::UcdChannelEncodings const & arg0) [constructor]
cls.add_constructor([param('ns3::UcdChannelEncodings const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::UcdChannelEncodings::UcdChannelEncodings() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::UcdChannelEncodings::GetBwReqOppSize() const [member function]
cls.add_method('GetBwReqOppSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::UcdChannelEncodings::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'uint32_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::UcdChannelEncodings::GetRangReqOppSize() const [member function]
cls.add_method('GetRangReqOppSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::UcdChannelEncodings::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::UcdChannelEncodings::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## ul-mac-messages.h (module 'wimax'): void ns3::UcdChannelEncodings::SetBwReqOppSize(uint16_t bwReqOppSize) [member function]
cls.add_method('SetBwReqOppSize',
'void',
[param('uint16_t', 'bwReqOppSize')])
## ul-mac-messages.h (module 'wimax'): void ns3::UcdChannelEncodings::SetFrequency(uint32_t frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('uint32_t', 'frequency')])
## ul-mac-messages.h (module 'wimax'): void ns3::UcdChannelEncodings::SetRangReqOppSize(uint16_t rangReqOppSize) [member function]
cls.add_method('SetRangReqOppSize',
'void',
[param('uint16_t', 'rangReqOppSize')])
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::UcdChannelEncodings::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::UcdChannelEncodings::DoRead(ns3::Buffer::Iterator start) [member function]
cls.add_method('DoRead',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True, is_pure_virtual=True, visibility='private')
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::UcdChannelEncodings::DoWrite(ns3::Buffer::Iterator start) const [member function]
cls.add_method('DoWrite',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
return
def register_Ns3VectorTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue::VectorTlvValue(ns3::VectorTlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::VectorTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue::VectorTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): void ns3::VectorTlvValue::Add(ns3::Tlv const & val) [member function]
cls.add_method('Add',
'void',
[param('ns3::Tlv const &', 'val')])
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue::Iterator ns3::VectorTlvValue::Begin() const [member function]
cls.add_method('Begin',
'ns3::VectorTlvValue::Iterator',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue * ns3::VectorTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::VectorTlvValue *',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::VectorTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True, is_pure_virtual=True)
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue::Iterator ns3::VectorTlvValue::End() const [member function]
cls.add_method('End',
'ns3::VectorTlvValue::Iterator',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::VectorTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::VectorTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3WimaxHelper_methods(root_module, cls):
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::WimaxHelper(ns3::WimaxHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::WimaxHelper const &', 'arg0')])
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::WimaxHelper() [constructor]
cls.add_constructor([])
## wimax-helper.h (module 'wimax'): int64_t ns3::WimaxHelper::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## wimax-helper.h (module 'wimax'): int64_t ns3::WimaxHelper::AssignStreams(ns3::NetDeviceContainer c, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('ns3::NetDeviceContainer', 'c'), param('int64_t', 'stream')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::BSScheduler> ns3::WimaxHelper::CreateBSScheduler(ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('CreateBSScheduler',
'ns3::Ptr< ns3::BSScheduler >',
[param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxHelper::CreatePhy(ns3::WimaxHelper::PhyType phyType) [member function]
cls.add_method('CreatePhy',
'ns3::Ptr< ns3::WimaxPhy >',
[param('ns3::WimaxHelper::PhyType', 'phyType')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxHelper::CreatePhy(ns3::WimaxHelper::PhyType phyType, char * SNRTraceFilePath, bool activateLoss) [member function]
cls.add_method('CreatePhy',
'ns3::Ptr< ns3::WimaxPhy >',
[param('ns3::WimaxHelper::PhyType', 'phyType'), param('char *', 'SNRTraceFilePath'), param('bool', 'activateLoss')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxHelper::CreatePhyWithoutChannel(ns3::WimaxHelper::PhyType phyType) [member function]
cls.add_method('CreatePhyWithoutChannel',
'ns3::Ptr< ns3::WimaxPhy >',
[param('ns3::WimaxHelper::PhyType', 'phyType')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxHelper::CreatePhyWithoutChannel(ns3::WimaxHelper::PhyType phyType, char * SNRTraceFilePath, bool activateLoss) [member function]
cls.add_method('CreatePhyWithoutChannel',
'ns3::Ptr< ns3::WimaxPhy >',
[param('ns3::WimaxHelper::PhyType', 'phyType'), param('char *', 'SNRTraceFilePath'), param('bool', 'activateLoss')])
## wimax-helper.h (module 'wimax'): ns3::ServiceFlow ns3::WimaxHelper::CreateServiceFlow(ns3::ServiceFlow::Direction direction, ns3::ServiceFlow::SchedulingType schedulinType, ns3::IpcsClassifierRecord classifier) [member function]
cls.add_method('CreateServiceFlow',
'ns3::ServiceFlow',
[param('ns3::ServiceFlow::Direction', 'direction'), param('ns3::ServiceFlow::SchedulingType', 'schedulinType'), param('ns3::IpcsClassifierRecord', 'classifier')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::UplinkScheduler> ns3::WimaxHelper::CreateUplinkScheduler(ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('CreateUplinkScheduler',
'ns3::Ptr< ns3::UplinkScheduler >',
[param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): static void ns3::WimaxHelper::EnableAsciiForConnection(ns3::Ptr<ns3::OutputStreamWrapper> oss, uint32_t nodeid, uint32_t deviceid, char * netdevice, char * connection) [member function]
cls.add_method('EnableAsciiForConnection',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'oss'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('char *', 'netdevice'), param('char *', 'connection')],
is_static=True)
## wimax-helper.h (module 'wimax'): static void ns3::WimaxHelper::EnableLogComponents() [member function]
cls.add_method('EnableLogComponents',
'void',
[],
is_static=True)
## wimax-helper.h (module 'wimax'): ns3::NetDeviceContainer ns3::WimaxHelper::Install(ns3::NodeContainer c, ns3::WimaxHelper::NetDeviceType type, ns3::WimaxHelper::PhyType phyType, ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c'), param('ns3::WimaxHelper::NetDeviceType', 'type'), param('ns3::WimaxHelper::PhyType', 'phyType'), param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): ns3::NetDeviceContainer ns3::WimaxHelper::Install(ns3::NodeContainer c, ns3::WimaxHelper::NetDeviceType deviceType, ns3::WimaxHelper::PhyType phyType, ns3::Ptr<ns3::WimaxChannel> channel, ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c'), param('ns3::WimaxHelper::NetDeviceType', 'deviceType'), param('ns3::WimaxHelper::PhyType', 'phyType'), param('ns3::Ptr< ns3::WimaxChannel >', 'channel'), param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): ns3::NetDeviceContainer ns3::WimaxHelper::Install(ns3::NodeContainer c, ns3::WimaxHelper::NetDeviceType deviceType, ns3::WimaxHelper::PhyType phyType, ns3::WimaxHelper::SchedulerType schedulerType, double frameDuration) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c'), param('ns3::WimaxHelper::NetDeviceType', 'deviceType'), param('ns3::WimaxHelper::PhyType', 'phyType'), param('ns3::WimaxHelper::SchedulerType', 'schedulerType'), param('double', 'frameDuration')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxNetDevice> ns3::WimaxHelper::Install(ns3::Ptr<ns3::Node> node, ns3::WimaxHelper::NetDeviceType deviceType, ns3::WimaxHelper::PhyType phyType, ns3::Ptr<ns3::WimaxChannel> channel, ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('Install',
'ns3::Ptr< ns3::WimaxNetDevice >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::WimaxHelper::NetDeviceType', 'deviceType'), param('ns3::WimaxHelper::PhyType', 'phyType'), param('ns3::Ptr< ns3::WimaxChannel >', 'channel'), param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): void ns3::WimaxHelper::SetPropagationLossModel(ns3::SimpleOfdmWimaxChannel::PropModel propagationModel) [member function]
cls.add_method('SetPropagationLossModel',
'void',
[param('ns3::SimpleOfdmWimaxChannel::PropModel', 'propagationModel')])
## wimax-helper.h (module 'wimax'): void ns3::WimaxHelper::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
is_virtual=True, visibility='private')
## wimax-helper.h (module 'wimax'): void ns3::WimaxHelper::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename, bool promiscuous) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename'), param('bool', 'promiscuous')],
is_virtual=True, visibility='private')
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::int64x64_t'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_unary_numeric_operator('-')
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(double const value) [constructor]
cls.add_constructor([param('double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long double const value) [constructor]
cls.add_constructor([param('long double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int const v) [constructor]
cls.add_constructor([param('int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long int const v) [constructor]
cls.add_constructor([param('long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int const v) [constructor]
cls.add_constructor([param('long long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int const v) [constructor]
cls.add_constructor([param('unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int const v) [constructor]
cls.add_constructor([param('long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int const v) [constructor]
cls.add_constructor([param('long long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t const hi, uint64_t const lo) [constructor]
cls.add_constructor([param('int64_t const', 'hi'), param('uint64_t const', 'lo')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t const v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t const', 'v')],
is_static=True)
## int64x64-128.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3SimpleOfdmSendParam_methods(root_module, cls):
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam::simpleOfdmSendParam(ns3::simpleOfdmSendParam const & arg0) [constructor]
cls.add_constructor([param('ns3::simpleOfdmSendParam const &', 'arg0')])
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam::simpleOfdmSendParam() [constructor]
cls.add_constructor([])
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam::simpleOfdmSendParam(ns3::bvec const & fecBlock, uint32_t burstSize, bool isFirstBlock, uint64_t Frequency, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction, double rxPowerDbm) [constructor]
cls.add_constructor([param('ns3::bvec const &', 'fecBlock'), param('uint32_t', 'burstSize'), param('bool', 'isFirstBlock'), param('uint64_t', 'Frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'rxPowerDbm')])
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam::simpleOfdmSendParam(uint32_t burstSize, bool isFirstBlock, uint64_t Frequency, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction, double rxPowerDbm, ns3::Ptr<ns3::PacketBurst> burst) [constructor]
cls.add_constructor([param('uint32_t', 'burstSize'), param('bool', 'isFirstBlock'), param('uint64_t', 'Frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'rxPowerDbm'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-send-param.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::simpleOfdmSendParam::GetBurst() [member function]
cls.add_method('GetBurst',
'ns3::Ptr< ns3::PacketBurst >',
[])
## simple-ofdm-send-param.h (module 'wimax'): uint32_t ns3::simpleOfdmSendParam::GetBurstSize() [member function]
cls.add_method('GetBurstSize',
'uint32_t',
[])
## simple-ofdm-send-param.h (module 'wimax'): uint8_t ns3::simpleOfdmSendParam::GetDirection() [member function]
cls.add_method('GetDirection',
'uint8_t',
[])
## simple-ofdm-send-param.h (module 'wimax'): ns3::bvec ns3::simpleOfdmSendParam::GetFecBlock() [member function]
cls.add_method('GetFecBlock',
'ns3::bvec',
[])
## simple-ofdm-send-param.h (module 'wimax'): uint64_t ns3::simpleOfdmSendParam::GetFrequency() [member function]
cls.add_method('GetFrequency',
'uint64_t',
[])
## simple-ofdm-send-param.h (module 'wimax'): bool ns3::simpleOfdmSendParam::GetIsFirstBlock() [member function]
cls.add_method('GetIsFirstBlock',
'bool',
[])
## simple-ofdm-send-param.h (module 'wimax'): ns3::WimaxPhy::ModulationType ns3::simpleOfdmSendParam::GetModulationType() [member function]
cls.add_method('GetModulationType',
'ns3::WimaxPhy::ModulationType',
[])
## simple-ofdm-send-param.h (module 'wimax'): double ns3::simpleOfdmSendParam::GetRxPowerDbm() [member function]
cls.add_method('GetRxPowerDbm',
'double',
[])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetBurstSize(uint32_t burstSize) [member function]
cls.add_method('SetBurstSize',
'void',
[param('uint32_t', 'burstSize')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetDirection(uint8_t direction) [member function]
cls.add_method('SetDirection',
'void',
[param('uint8_t', 'direction')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetFecBlock(ns3::bvec const & fecBlock) [member function]
cls.add_method('SetFecBlock',
'void',
[param('ns3::bvec const &', 'fecBlock')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetFrequency(uint64_t Frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('uint64_t', 'Frequency')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetIsFirstBlock(bool isFirstBlock) [member function]
cls.add_method('SetIsFirstBlock',
'void',
[param('bool', 'isFirstBlock')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetModulationType(ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('SetModulationType',
'void',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetRxPowerDbm(double rxPowerDbm) [member function]
cls.add_method('SetRxPowerDbm',
'void',
[param('double', 'rxPowerDbm')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True, is_pure_virtual=True)
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3ClassificationRuleVectorTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue::ClassificationRuleVectorTlvValue(ns3::ClassificationRuleVectorTlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::ClassificationRuleVectorTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue::ClassificationRuleVectorTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue * ns3::ClassificationRuleVectorTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::ClassificationRuleVectorTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::ClassificationRuleVectorTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
return
def register_Ns3CsParamVectorTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue::CsParamVectorTlvValue(ns3::CsParamVectorTlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::CsParamVectorTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue::CsParamVectorTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue * ns3::CsParamVectorTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::CsParamVectorTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::CsParamVectorTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True, is_pure_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3Ipv4AddressTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::Ipv4AddressTlvValue(ns3::Ipv4AddressTlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::Ipv4AddressTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): void ns3::Ipv4AddressTlvValue::Add(ns3::Ipv4Address address, ns3::Ipv4Mask Mask) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'Mask')])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::Iterator ns3::Ipv4AddressTlvValue::Begin() const [member function]
cls.add_method('Begin',
'ns3::Ipv4AddressTlvValue::Iterator',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue * ns3::Ipv4AddressTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ipv4AddressTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::Ipv4AddressTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::Iterator ns3::Ipv4AddressTlvValue::End() const [member function]
cls.add_method('End',
'ns3::Ipv4AddressTlvValue::Iterator',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::Ipv4AddressTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::Ipv4AddressTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3Ipv4AddressTlvValueIpv4Addr_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr::ipv4Addr() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr::ipv4Addr(ns3::Ipv4AddressTlvValue::ipv4Addr const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressTlvValue::ipv4Addr const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr::Address [variable]
cls.add_instance_attribute('Address', 'ns3::Ipv4Address', is_const=False)
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr::Mask [variable]
cls.add_instance_attribute('Mask', 'ns3::Ipv4Mask', is_const=False)
return
def register_Ns3MacHeaderType_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType::MacHeaderType(ns3::MacHeaderType const & arg0) [constructor]
cls.add_constructor([param('ns3::MacHeaderType const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType::MacHeaderType() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType::MacHeaderType(uint8_t type) [constructor]
cls.add_constructor([param('uint8_t', 'type')])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::MacHeaderType::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::MacHeaderType::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::MacHeaderType::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::MacHeaderType::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::MacHeaderType::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::MacHeaderType::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::MacHeaderType::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::MacHeaderType::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::MacHeaderType::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
return
def register_Ns3ManagementMessageType_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType::ManagementMessageType(ns3::ManagementMessageType const & arg0) [constructor]
cls.add_constructor([param('ns3::ManagementMessageType const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType::ManagementMessageType() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType::ManagementMessageType(uint8_t type) [constructor]
cls.add_constructor([param('uint8_t', 'type')])
## mac-messages.h (module 'wimax'): uint32_t ns3::ManagementMessageType::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::ManagementMessageType::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): std::string ns3::ManagementMessageType::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::ManagementMessageType::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::ManagementMessageType::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::ManagementMessageType::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::ManagementMessageType::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::ManagementMessageType::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::ManagementMessageType::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3OfdmDownlinkFramePrefix_methods(root_module, cls):
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::OfdmDownlinkFramePrefix::OfdmDownlinkFramePrefix(ns3::OfdmDownlinkFramePrefix const & arg0) [constructor]
cls.add_constructor([param('ns3::OfdmDownlinkFramePrefix const &', 'arg0')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::OfdmDownlinkFramePrefix::OfdmDownlinkFramePrefix() [constructor]
cls.add_constructor([])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::AddDlFramePrefixElement(ns3::DlFramePrefixIe dlFramePrefixElement) [member function]
cls.add_method('AddDlFramePrefixElement',
'void',
[param('ns3::DlFramePrefixIe', 'dlFramePrefixElement')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint32_t ns3::OfdmDownlinkFramePrefix::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::Mac48Address ns3::OfdmDownlinkFramePrefix::GetBaseStationId() const [member function]
cls.add_method('GetBaseStationId',
'ns3::Mac48Address',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::OfdmDownlinkFramePrefix::GetConfigurationChangeCount() const [member function]
cls.add_method('GetConfigurationChangeCount',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): std::vector<ns3::DlFramePrefixIe, std::allocator<ns3::DlFramePrefixIe> > ns3::OfdmDownlinkFramePrefix::GetDlFramePrefixElements() const [member function]
cls.add_method('GetDlFramePrefixElements',
'std::vector< ns3::DlFramePrefixIe >',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint32_t ns3::OfdmDownlinkFramePrefix::GetFrameNumber() const [member function]
cls.add_method('GetFrameNumber',
'uint32_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::OfdmDownlinkFramePrefix::GetHcs() const [member function]
cls.add_method('GetHcs',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): std::string ns3::OfdmDownlinkFramePrefix::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint32_t ns3::OfdmDownlinkFramePrefix::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): static ns3::TypeId ns3::OfdmDownlinkFramePrefix::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::SetBaseStationId(ns3::Mac48Address baseStationId) [member function]
cls.add_method('SetBaseStationId',
'void',
[param('ns3::Mac48Address', 'baseStationId')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::SetConfigurationChangeCount(uint8_t configurationChangeCount) [member function]
cls.add_method('SetConfigurationChangeCount',
'void',
[param('uint8_t', 'configurationChangeCount')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::SetFrameNumber(uint32_t frameNumber) [member function]
cls.add_method('SetFrameNumber',
'void',
[param('uint32_t', 'frameNumber')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::SetHcs(uint8_t hcs) [member function]
cls.add_method('SetHcs',
'void',
[param('uint8_t', 'hcs')])
return
def register_Ns3OfdmSendParams_methods(root_module, cls):
## send-params.h (module 'wimax'): ns3::OfdmSendParams::OfdmSendParams(ns3::OfdmSendParams const & arg0) [constructor]
cls.add_constructor([param('ns3::OfdmSendParams const &', 'arg0')])
## send-params.h (module 'wimax'): ns3::OfdmSendParams::OfdmSendParams(ns3::Ptr<ns3::PacketBurst> burst, uint8_t modulationType, uint8_t direction) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::PacketBurst >', 'burst'), param('uint8_t', 'modulationType'), param('uint8_t', 'direction')])
## send-params.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::OfdmSendParams::GetBurst() const [member function]
cls.add_method('GetBurst',
'ns3::Ptr< ns3::PacketBurst >',
[],
is_const=True)
## send-params.h (module 'wimax'): uint8_t ns3::OfdmSendParams::GetDirection() const [member function]
cls.add_method('GetDirection',
'uint8_t',
[],
is_const=True)
## send-params.h (module 'wimax'): uint8_t ns3::OfdmSendParams::GetModulationType() const [member function]
cls.add_method('GetModulationType',
'uint8_t',
[],
is_const=True)
return
def register_Ns3OfdmUcdChannelEncodings_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUcdChannelEncodings::OfdmUcdChannelEncodings(ns3::OfdmUcdChannelEncodings const & arg0) [constructor]
cls.add_constructor([param('ns3::OfdmUcdChannelEncodings const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUcdChannelEncodings::OfdmUcdChannelEncodings() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUcdChannelEncodings::GetSbchnlFocContCodes() const [member function]
cls.add_method('GetSbchnlFocContCodes',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUcdChannelEncodings::GetSbchnlReqRegionFullParams() const [member function]
cls.add_method('GetSbchnlReqRegionFullParams',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUcdChannelEncodings::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUcdChannelEncodings::SetSbchnlFocContCodes(uint8_t sbchnlFocContCodes) [member function]
cls.add_method('SetSbchnlFocContCodes',
'void',
[param('uint8_t', 'sbchnlFocContCodes')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUcdChannelEncodings::SetSbchnlReqRegionFullParams(uint8_t sbchnlReqRegionFullParams) [member function]
cls.add_method('SetSbchnlReqRegionFullParams',
'void',
[param('uint8_t', 'sbchnlReqRegionFullParams')])
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUcdChannelEncodings::DoRead(ns3::Buffer::Iterator start) [member function]
cls.add_method('DoRead',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True, visibility='private')
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUcdChannelEncodings::DoWrite(ns3::Buffer::Iterator start) const [member function]
cls.add_method('DoWrite',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True, visibility='private')
return
def register_Ns3PacketBurst_methods(root_module, cls):
## packet-burst.h (module 'network'): ns3::PacketBurst::PacketBurst(ns3::PacketBurst const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketBurst const &', 'arg0')])
## packet-burst.h (module 'network'): ns3::PacketBurst::PacketBurst() [constructor]
cls.add_constructor([])
## packet-burst.h (module 'network'): void ns3::PacketBurst::AddPacket(ns3::Ptr<ns3::Packet> packet) [member function]
cls.add_method('AddPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet')])
## packet-burst.h (module 'network'): std::list<ns3::Ptr<ns3::Packet>, std::allocator<ns3::Ptr<ns3::Packet> > >::const_iterator ns3::PacketBurst::Begin() const [member function]
cls.add_method('Begin',
'std::list< ns3::Ptr< ns3::Packet > > const_iterator',
[],
is_const=True)
## packet-burst.h (module 'network'): ns3::Ptr<ns3::PacketBurst> ns3::PacketBurst::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::PacketBurst >',
[],
is_const=True)
## packet-burst.h (module 'network'): std::list<ns3::Ptr<ns3::Packet>, std::allocator<ns3::Ptr<ns3::Packet> > >::const_iterator ns3::PacketBurst::End() const [member function]
cls.add_method('End',
'std::list< ns3::Ptr< ns3::Packet > > const_iterator',
[],
is_const=True)
## packet-burst.h (module 'network'): uint32_t ns3::PacketBurst::GetNPackets() const [member function]
cls.add_method('GetNPackets',
'uint32_t',
[],
is_const=True)
## packet-burst.h (module 'network'): std::list<ns3::Ptr<ns3::Packet>, std::allocator<ns3::Ptr<ns3::Packet> > > ns3::PacketBurst::GetPackets() const [member function]
cls.add_method('GetPackets',
'std::list< ns3::Ptr< ns3::Packet > >',
[],
is_const=True)
## packet-burst.h (module 'network'): uint32_t ns3::PacketBurst::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet-burst.h (module 'network'): static ns3::TypeId ns3::PacketBurst::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## packet-burst.h (module 'network'): void ns3::PacketBurst::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='private')
return
def register_Ns3PcapFileWrapper_methods(root_module, cls):
## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor]
cls.add_constructor([])
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::ios_base::openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::ios_base::openmode', 'mode')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header const & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Header const &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')])
## pcap-file-wrapper.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::PcapFileWrapper::Read(ns3::Time & t) [member function]
cls.add_method('Read',
'ns3::Ptr< ns3::Packet >',
[param('ns3::Time &', 't')])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
return
def register_Ns3PortRangeTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRangeTlvValue(ns3::PortRangeTlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::PortRangeTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRangeTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): void ns3::PortRangeTlvValue::Add(uint16_t portLow, uint16_t portHigh) [member function]
cls.add_method('Add',
'void',
[param('uint16_t', 'portLow'), param('uint16_t', 'portHigh')])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::Iterator ns3::PortRangeTlvValue::Begin() const [member function]
cls.add_method('Begin',
'ns3::PortRangeTlvValue::Iterator',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue * ns3::PortRangeTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::PortRangeTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::PortRangeTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::Iterator ns3::PortRangeTlvValue::End() const [member function]
cls.add_method('End',
'ns3::PortRangeTlvValue::Iterator',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::PortRangeTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::PortRangeTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3PortRangeTlvValuePortRange_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange::PortRange() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange::PortRange(ns3::PortRangeTlvValue::PortRange const & arg0) [constructor]
cls.add_constructor([param('ns3::PortRangeTlvValue::PortRange const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange::PortHigh [variable]
cls.add_instance_attribute('PortHigh', 'uint16_t', is_const=False)
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange::PortLow [variable]
cls.add_instance_attribute('PortLow', 'uint16_t', is_const=False)
return
def register_Ns3PriorityUlJob_methods(root_module, cls):
## ul-job.h (module 'wimax'): ns3::PriorityUlJob::PriorityUlJob(ns3::PriorityUlJob const & arg0) [constructor]
cls.add_constructor([param('ns3::PriorityUlJob const &', 'arg0')])
## ul-job.h (module 'wimax'): ns3::PriorityUlJob::PriorityUlJob() [constructor]
cls.add_constructor([])
## ul-job.h (module 'wimax'): int ns3::PriorityUlJob::GetPriority() [member function]
cls.add_method('GetPriority',
'int',
[])
## ul-job.h (module 'wimax'): ns3::Ptr<ns3::UlJob> ns3::PriorityUlJob::GetUlJob() [member function]
cls.add_method('GetUlJob',
'ns3::Ptr< ns3::UlJob >',
[])
## ul-job.h (module 'wimax'): void ns3::PriorityUlJob::SetPriority(int priority) [member function]
cls.add_method('SetPriority',
'void',
[param('int', 'priority')])
## ul-job.h (module 'wimax'): void ns3::PriorityUlJob::SetUlJob(ns3::Ptr<ns3::UlJob> job) [member function]
cls.add_method('SetUlJob',
'void',
[param('ns3::Ptr< ns3::UlJob >', 'job')])
return
def register_Ns3PropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel::PropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function]
cls.add_method('SetNext',
'void',
[param('ns3::Ptr< ns3::PropagationLossModel >', 'next')])
## propagation-loss-model.h (module 'propagation'): ns3::Ptr<ns3::PropagationLossModel> ns3::PropagationLossModel::GetNext() [member function]
cls.add_method('GetNext',
'ns3::Ptr< ns3::PropagationLossModel >',
[])
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('CalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, is_pure_virtual=True, visibility='private')
return
def register_Ns3ProtocolTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue::ProtocolTlvValue(ns3::ProtocolTlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::ProtocolTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue::ProtocolTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): void ns3::ProtocolTlvValue::Add(uint8_t protocol) [member function]
cls.add_method('Add',
'void',
[param('uint8_t', 'protocol')])
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue::Iterator ns3::ProtocolTlvValue::Begin() const [member function]
cls.add_method('Begin',
'ns3::ProtocolTlvValue::Iterator',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue * ns3::ProtocolTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::ProtocolTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::ProtocolTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue::Iterator ns3::ProtocolTlvValue::End() const [member function]
cls.add_method('End',
'ns3::ProtocolTlvValue::Iterator',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::ProtocolTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::ProtocolTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3RandomPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::RandomPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, visibility='private')
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True, is_pure_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True, is_pure_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3RangePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::RangePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, visibility='private')
return
def register_Ns3RngReq_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::RngReq::RngReq(ns3::RngReq const & arg0) [constructor]
cls.add_constructor([param('ns3::RngReq const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::RngReq::RngReq() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): uint32_t ns3::RngReq::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::RngReq::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::Mac48Address ns3::RngReq::GetMacAddress() const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[],
is_const=True)
## mac-messages.h (module 'wimax'): std::string ns3::RngReq::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngReq::GetRangingAnomalies() const [member function]
cls.add_method('GetRangingAnomalies',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngReq::GetReqDlBurstProfile() const [member function]
cls.add_method('GetReqDlBurstProfile',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngReq::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::RngReq::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::RngReq::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::RngReq::PrintDebug() const [member function]
cls.add_method('PrintDebug',
'void',
[],
is_const=True)
## mac-messages.h (module 'wimax'): void ns3::RngReq::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::RngReq::SetMacAddress(ns3::Mac48Address macAddress) [member function]
cls.add_method('SetMacAddress',
'void',
[param('ns3::Mac48Address', 'macAddress')])
## mac-messages.h (module 'wimax'): void ns3::RngReq::SetRangingAnomalies(uint8_t rangingAnomalies) [member function]
cls.add_method('SetRangingAnomalies',
'void',
[param('uint8_t', 'rangingAnomalies')])
## mac-messages.h (module 'wimax'): void ns3::RngReq::SetReqDlBurstProfile(uint8_t reqDlBurstProfile) [member function]
cls.add_method('SetReqDlBurstProfile',
'void',
[param('uint8_t', 'reqDlBurstProfile')])
return
def register_Ns3RngRsp_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::RngRsp::RngRsp(ns3::RngRsp const & arg0) [constructor]
cls.add_constructor([param('ns3::RngRsp const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::RngRsp::RngRsp() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetAasBdcastPermission() const [member function]
cls.add_method('GetAasBdcastPermission',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::Cid ns3::RngRsp::GetBasicCid() const [member function]
cls.add_method('GetBasicCid',
'ns3::Cid',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetDlFreqOverride() const [member function]
cls.add_method('GetDlFreqOverride',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::RngRsp::GetDlOperBurstProfile() const [member function]
cls.add_method('GetDlOperBurstProfile',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetFrameNumber() const [member function]
cls.add_method('GetFrameNumber',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetInitRangOppNumber() const [member function]
cls.add_method('GetInitRangOppNumber',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::RngRsp::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::Mac48Address ns3::RngRsp::GetMacAddress() const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[],
is_const=True)
## mac-messages.h (module 'wimax'): std::string ns3::RngRsp::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetOffsetFreqAdjust() const [member function]
cls.add_method('GetOffsetFreqAdjust',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetPowerLevelAdjust() const [member function]
cls.add_method('GetPowerLevelAdjust',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::Cid ns3::RngRsp::GetPrimaryCid() const [member function]
cls.add_method('GetPrimaryCid',
'ns3::Cid',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetRangStatus() const [member function]
cls.add_method('GetRangStatus',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetRangSubchnl() const [member function]
cls.add_method('GetRangSubchnl',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetTimingAdjust() const [member function]
cls.add_method('GetTimingAdjust',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::RngRsp::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetUlChnlIdOverride() const [member function]
cls.add_method('GetUlChnlIdOverride',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): void ns3::RngRsp::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::RngRsp::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetAasBdcastPermission(uint8_t aasBdcastPermission) [member function]
cls.add_method('SetAasBdcastPermission',
'void',
[param('uint8_t', 'aasBdcastPermission')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetBasicCid(ns3::Cid basicCid) [member function]
cls.add_method('SetBasicCid',
'void',
[param('ns3::Cid', 'basicCid')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetDlFreqOverride(uint32_t dlFreqOverride) [member function]
cls.add_method('SetDlFreqOverride',
'void',
[param('uint32_t', 'dlFreqOverride')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetDlOperBurstProfile(uint16_t dlOperBurstProfile) [member function]
cls.add_method('SetDlOperBurstProfile',
'void',
[param('uint16_t', 'dlOperBurstProfile')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetFrameNumber(uint32_t frameNumber) [member function]
cls.add_method('SetFrameNumber',
'void',
[param('uint32_t', 'frameNumber')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetInitRangOppNumber(uint8_t initRangOppNumber) [member function]
cls.add_method('SetInitRangOppNumber',
'void',
[param('uint8_t', 'initRangOppNumber')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetMacAddress(ns3::Mac48Address macAddress) [member function]
cls.add_method('SetMacAddress',
'void',
[param('ns3::Mac48Address', 'macAddress')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetOffsetFreqAdjust(uint32_t offsetFreqAdjust) [member function]
cls.add_method('SetOffsetFreqAdjust',
'void',
[param('uint32_t', 'offsetFreqAdjust')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetPowerLevelAdjust(uint8_t powerLevelAdjust) [member function]
cls.add_method('SetPowerLevelAdjust',
'void',
[param('uint8_t', 'powerLevelAdjust')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetPrimaryCid(ns3::Cid primaryCid) [member function]
cls.add_method('SetPrimaryCid',
'void',
[param('ns3::Cid', 'primaryCid')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetRangStatus(uint8_t rangStatus) [member function]
cls.add_method('SetRangStatus',
'void',
[param('uint8_t', 'rangStatus')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetRangSubchnl(uint8_t rangSubchnl) [member function]
cls.add_method('SetRangSubchnl',
'void',
[param('uint8_t', 'rangSubchnl')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetTimingAdjust(uint32_t timingAdjust) [member function]
cls.add_method('SetTimingAdjust',
'void',
[param('uint32_t', 'timingAdjust')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetUlChnlIdOverride(uint8_t ulChnlIdOverride) [member function]
cls.add_method('SetUlChnlIdOverride',
'void',
[param('uint8_t', 'ulChnlIdOverride')])
return
def register_Ns3SSManager_methods(root_module, cls):
## ss-manager.h (module 'wimax'): ns3::SSManager::SSManager(ns3::SSManager const & arg0) [constructor]
cls.add_constructor([param('ns3::SSManager const &', 'arg0')])
## ss-manager.h (module 'wimax'): ns3::SSManager::SSManager() [constructor]
cls.add_constructor([])
## ss-manager.h (module 'wimax'): ns3::SSRecord * ns3::SSManager::CreateSSRecord(ns3::Mac48Address const & macAddress) [member function]
cls.add_method('CreateSSRecord',
'ns3::SSRecord *',
[param('ns3::Mac48Address const &', 'macAddress')])
## ss-manager.h (module 'wimax'): void ns3::SSManager::DeleteSSRecord(ns3::Cid cid) [member function]
cls.add_method('DeleteSSRecord',
'void',
[param('ns3::Cid', 'cid')])
## ss-manager.h (module 'wimax'): ns3::Mac48Address ns3::SSManager::GetMacAddress(ns3::Cid cid) const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[param('ns3::Cid', 'cid')],
is_const=True)
## ss-manager.h (module 'wimax'): uint32_t ns3::SSManager::GetNRegisteredSSs() const [member function]
cls.add_method('GetNRegisteredSSs',
'uint32_t',
[],
is_const=True)
## ss-manager.h (module 'wimax'): uint32_t ns3::SSManager::GetNSSs() const [member function]
cls.add_method('GetNSSs',
'uint32_t',
[],
is_const=True)
## ss-manager.h (module 'wimax'): ns3::SSRecord * ns3::SSManager::GetSSRecord(ns3::Mac48Address const & macAddress) const [member function]
cls.add_method('GetSSRecord',
'ns3::SSRecord *',
[param('ns3::Mac48Address const &', 'macAddress')],
is_const=True)
## ss-manager.h (module 'wimax'): ns3::SSRecord * ns3::SSManager::GetSSRecord(ns3::Cid cid) const [member function]
cls.add_method('GetSSRecord',
'ns3::SSRecord *',
[param('ns3::Cid', 'cid')],
is_const=True)
## ss-manager.h (module 'wimax'): std::vector<ns3::SSRecord *, std::allocator<ns3::SSRecord *> > * ns3::SSManager::GetSSRecords() const [member function]
cls.add_method('GetSSRecords',
'std::vector< ns3::SSRecord * > *',
[],
is_const=True)
## ss-manager.h (module 'wimax'): static ns3::TypeId ns3::SSManager::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ss-manager.h (module 'wimax'): bool ns3::SSManager::IsInRecord(ns3::Mac48Address const & macAddress) const [member function]
cls.add_method('IsInRecord',
'bool',
[param('ns3::Mac48Address const &', 'macAddress')],
is_const=True)
## ss-manager.h (module 'wimax'): bool ns3::SSManager::IsRegistered(ns3::Mac48Address const & macAddress) const [member function]
cls.add_method('IsRegistered',
'bool',
[param('ns3::Mac48Address const &', 'macAddress')],
is_const=True)
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ServiceFlowManager_methods(root_module, cls):
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlowManager::ServiceFlowManager(ns3::ServiceFlowManager const & arg0) [constructor]
cls.add_constructor([param('ns3::ServiceFlowManager const &', 'arg0')])
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlowManager::ServiceFlowManager() [constructor]
cls.add_constructor([])
## service-flow-manager.h (module 'wimax'): void ns3::ServiceFlowManager::AddServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## service-flow-manager.h (module 'wimax'): bool ns3::ServiceFlowManager::AreServiceFlowsAllocated() [member function]
cls.add_method('AreServiceFlowsAllocated',
'bool',
[])
## service-flow-manager.h (module 'wimax'): bool ns3::ServiceFlowManager::AreServiceFlowsAllocated(std::vector<ns3::ServiceFlow *, std::allocator<ns3::ServiceFlow *> > * serviceFlows) [member function]
cls.add_method('AreServiceFlowsAllocated',
'bool',
[param('std::vector< ns3::ServiceFlow * > *', 'serviceFlows')])
## service-flow-manager.h (module 'wimax'): bool ns3::ServiceFlowManager::AreServiceFlowsAllocated(std::vector<ns3::ServiceFlow *, std::allocator<ns3::ServiceFlow *> > serviceFlows) [member function]
cls.add_method('AreServiceFlowsAllocated',
'bool',
[param('std::vector< ns3::ServiceFlow * >', 'serviceFlows')])
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::ServiceFlowManager::DoClassify(ns3::Ipv4Address SrcAddress, ns3::Ipv4Address DstAddress, uint16_t SrcPort, uint16_t DstPort, uint8_t Proto, ns3::ServiceFlow::Direction dir) const [member function]
cls.add_method('DoClassify',
'ns3::ServiceFlow *',
[param('ns3::Ipv4Address', 'SrcAddress'), param('ns3::Ipv4Address', 'DstAddress'), param('uint16_t', 'SrcPort'), param('uint16_t', 'DstPort'), param('uint8_t', 'Proto'), param('ns3::ServiceFlow::Direction', 'dir')],
is_const=True)
## service-flow-manager.h (module 'wimax'): void ns3::ServiceFlowManager::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::ServiceFlowManager::GetNextServiceFlowToAllocate() [member function]
cls.add_method('GetNextServiceFlowToAllocate',
'ns3::ServiceFlow *',
[])
## service-flow-manager.h (module 'wimax'): uint32_t ns3::ServiceFlowManager::GetNrServiceFlows() const [member function]
cls.add_method('GetNrServiceFlows',
'uint32_t',
[],
is_const=True)
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::ServiceFlowManager::GetServiceFlow(uint32_t sfid) const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[param('uint32_t', 'sfid')],
is_const=True)
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::ServiceFlowManager::GetServiceFlow(ns3::Cid cid) const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[param('ns3::Cid', 'cid')],
is_const=True)
## service-flow-manager.h (module 'wimax'): std::vector<ns3::ServiceFlow *, std::allocator<ns3::ServiceFlow *> > ns3::ServiceFlowManager::GetServiceFlows(ns3::ServiceFlow::SchedulingType schedulingType) const [member function]
cls.add_method('GetServiceFlows',
'std::vector< ns3::ServiceFlow * >',
[param('ns3::ServiceFlow::SchedulingType', 'schedulingType')],
is_const=True)
## service-flow-manager.h (module 'wimax'): static ns3::TypeId ns3::ServiceFlowManager::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3SfVectorTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue::SfVectorTlvValue(ns3::SfVectorTlvValue const & arg0) [constructor]
cls.add_constructor([param('ns3::SfVectorTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue::SfVectorTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue * ns3::SfVectorTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::SfVectorTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::SfVectorTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
return
def register_Ns3SsServiceFlowManager_methods(root_module, cls):
## ss-service-flow-manager.h (module 'wimax'): ns3::SsServiceFlowManager::SsServiceFlowManager(ns3::SsServiceFlowManager const & arg0) [constructor]
cls.add_constructor([param('ns3::SsServiceFlowManager const &', 'arg0')])
## ss-service-flow-manager.h (module 'wimax'): ns3::SsServiceFlowManager::SsServiceFlowManager(ns3::Ptr<ns3::SubscriberStationNetDevice> device) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::SubscriberStationNetDevice >', 'device')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::AddServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::AddServiceFlow(ns3::ServiceFlow serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow', 'serviceFlow')])
## ss-service-flow-manager.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::SsServiceFlowManager::CreateDsaAck() [member function]
cls.add_method('CreateDsaAck',
'ns3::Ptr< ns3::Packet >',
[])
## ss-service-flow-manager.h (module 'wimax'): ns3::DsaReq ns3::SsServiceFlowManager::CreateDsaReq(ns3::ServiceFlow const * serviceFlow) [member function]
cls.add_method('CreateDsaReq',
'ns3::DsaReq',
[param('ns3::ServiceFlow const *', 'serviceFlow')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## ss-service-flow-manager.h (module 'wimax'): ns3::EventId ns3::SsServiceFlowManager::GetDsaAckTimeoutEvent() const [member function]
cls.add_method('GetDsaAckTimeoutEvent',
'ns3::EventId',
[],
is_const=True)
## ss-service-flow-manager.h (module 'wimax'): ns3::EventId ns3::SsServiceFlowManager::GetDsaRspTimeoutEvent() const [member function]
cls.add_method('GetDsaRspTimeoutEvent',
'ns3::EventId',
[],
is_const=True)
## ss-service-flow-manager.h (module 'wimax'): uint8_t ns3::SsServiceFlowManager::GetMaxDsaReqRetries() const [member function]
cls.add_method('GetMaxDsaReqRetries',
'uint8_t',
[],
is_const=True)
## ss-service-flow-manager.h (module 'wimax'): static ns3::TypeId ns3::SsServiceFlowManager::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::InitiateServiceFlows() [member function]
cls.add_method('InitiateServiceFlows',
'void',
[])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::ProcessDsaRsp(ns3::DsaRsp const & dsaRsp) [member function]
cls.add_method('ProcessDsaRsp',
'void',
[param('ns3::DsaRsp const &', 'dsaRsp')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::ScheduleDsaReq(ns3::ServiceFlow const * serviceFlow) [member function]
cls.add_method('ScheduleDsaReq',
'void',
[param('ns3::ServiceFlow const *', 'serviceFlow')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::SetMaxDsaReqRetries(uint8_t maxDsaReqRetries) [member function]
cls.add_method('SetMaxDsaReqRetries',
'void',
[param('uint8_t', 'maxDsaReqRetries')])
return
def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeLogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, visibility='private')
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3Tlv_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::Tlv::Tlv(uint8_t type, uint64_t length, ns3::TlvValue const & value) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint64_t', 'length'), param('ns3::TlvValue const &', 'value')])
## wimax-tlv.h (module 'wimax'): ns3::Tlv::Tlv() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::Tlv::Tlv(ns3::Tlv const & tlv) [constructor]
cls.add_constructor([param('ns3::Tlv const &', 'tlv')])
## wimax-tlv.h (module 'wimax'): ns3::Tlv * ns3::Tlv::Copy() const [member function]
cls.add_method('Copy',
'ns3::Tlv *',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::TlvValue * ns3::Tlv::CopyValue() const [member function]
cls.add_method('CopyValue',
'ns3::TlvValue *',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::Tlv::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): ns3::TypeId ns3::Tlv::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint64_t ns3::Tlv::GetLength() const [member function]
cls.add_method('GetLength',
'uint64_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::Tlv::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::Tlv::GetSizeOfLen() const [member function]
cls.add_method('GetSizeOfLen',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::Tlv::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): static ns3::TypeId ns3::Tlv::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-tlv.h (module 'wimax'): ns3::TlvValue * ns3::Tlv::PeekValue() [member function]
cls.add_method('PeekValue',
'ns3::TlvValue *',
[])
## wimax-tlv.h (module 'wimax'): void ns3::Tlv::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::Tlv::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_virtual=True, is_pure_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetFrequency(double frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'frequency')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function]
cls.add_method('SetHeightAboveZ',
'void',
[param('double', 'heightAboveZ')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::TwoRayGroundPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, visibility='private')
return
def register_Ns3Ucd_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::Ucd::Ucd(ns3::Ucd const & arg0) [constructor]
cls.add_constructor([param('ns3::Ucd const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::Ucd::Ucd() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::AddUlBurstProfile(ns3::OfdmUlBurstProfile ulBurstProfile) [member function]
cls.add_method('AddUlBurstProfile',
'void',
[param('ns3::OfdmUlBurstProfile', 'ulBurstProfile')])
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::Ucd::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUcdChannelEncodings ns3::Ucd::GetChannelEncodings() const [member function]
cls.add_method('GetChannelEncodings',
'ns3::OfdmUcdChannelEncodings',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetConfigurationChangeCount() const [member function]
cls.add_method('GetConfigurationChangeCount',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::TypeId ns3::Ucd::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): std::string ns3::Ucd::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetNrUlBurstProfiles() const [member function]
cls.add_method('GetNrUlBurstProfiles',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetRangingBackoffEnd() const [member function]
cls.add_method('GetRangingBackoffEnd',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetRangingBackoffStart() const [member function]
cls.add_method('GetRangingBackoffStart',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetRequestBackoffEnd() const [member function]
cls.add_method('GetRequestBackoffEnd',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetRequestBackoffStart() const [member function]
cls.add_method('GetRequestBackoffStart',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::Ucd::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): static ns3::TypeId ns3::Ucd::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ul-mac-messages.h (module 'wimax'): std::vector<ns3::OfdmUlBurstProfile, std::allocator<ns3::OfdmUlBurstProfile> > ns3::Ucd::GetUlBurstProfiles() const [member function]
cls.add_method('GetUlBurstProfiles',
'std::vector< ns3::OfdmUlBurstProfile >',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetChannelEncodings(ns3::OfdmUcdChannelEncodings channelEncodings) [member function]
cls.add_method('SetChannelEncodings',
'void',
[param('ns3::OfdmUcdChannelEncodings', 'channelEncodings')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetConfigurationChangeCount(uint8_t ucdCount) [member function]
cls.add_method('SetConfigurationChangeCount',
'void',
[param('uint8_t', 'ucdCount')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetNrUlBurstProfiles(uint8_t nrUlBurstProfiles) [member function]
cls.add_method('SetNrUlBurstProfiles',
'void',
[param('uint8_t', 'nrUlBurstProfiles')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetRangingBackoffEnd(uint8_t rangingBackoffEnd) [member function]
cls.add_method('SetRangingBackoffEnd',
'void',
[param('uint8_t', 'rangingBackoffEnd')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetRangingBackoffStart(uint8_t rangingBackoffStart) [member function]
cls.add_method('SetRangingBackoffStart',
'void',
[param('uint8_t', 'rangingBackoffStart')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetRequestBackoffEnd(uint8_t requestBackoffEnd) [member function]
cls.add_method('SetRequestBackoffEnd',
'void',
[param('uint8_t', 'requestBackoffEnd')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetRequestBackoffStart(uint8_t requestBackoffStart) [member function]
cls.add_method('SetRequestBackoffStart',
'void',
[param('uint8_t', 'requestBackoffStart')])
return
def register_Ns3UlJob_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## ul-job.h (module 'wimax'): ns3::UlJob::UlJob(ns3::UlJob const & arg0) [constructor]
cls.add_constructor([param('ns3::UlJob const &', 'arg0')])
## ul-job.h (module 'wimax'): ns3::UlJob::UlJob() [constructor]
cls.add_constructor([])
## ul-job.h (module 'wimax'): ns3::Time ns3::UlJob::GetDeadline() [member function]
cls.add_method('GetDeadline',
'ns3::Time',
[])
## ul-job.h (module 'wimax'): ns3::Time ns3::UlJob::GetPeriod() [member function]
cls.add_method('GetPeriod',
'ns3::Time',
[])
## ul-job.h (module 'wimax'): ns3::Time ns3::UlJob::GetReleaseTime() [member function]
cls.add_method('GetReleaseTime',
'ns3::Time',
[])
## ul-job.h (module 'wimax'): ns3::ServiceFlow::SchedulingType ns3::UlJob::GetSchedulingType() [member function]
cls.add_method('GetSchedulingType',
'ns3::ServiceFlow::SchedulingType',
[])
## ul-job.h (module 'wimax'): ns3::ServiceFlow * ns3::UlJob::GetServiceFlow() [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[])
## ul-job.h (module 'wimax'): uint32_t ns3::UlJob::GetSize() [member function]
cls.add_method('GetSize',
'uint32_t',
[])
## ul-job.h (module 'wimax'): ns3::SSRecord * ns3::UlJob::GetSsRecord() [member function]
cls.add_method('GetSsRecord',
'ns3::SSRecord *',
[])
## ul-job.h (module 'wimax'): ns3::ReqType ns3::UlJob::GetType() [member function]
cls.add_method('GetType',
'ns3::ReqType',
[])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetDeadline(ns3::Time deadline) [member function]
cls.add_method('SetDeadline',
'void',
[param('ns3::Time', 'deadline')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetPeriod(ns3::Time period) [member function]
cls.add_method('SetPeriod',
'void',
[param('ns3::Time', 'period')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetReleaseTime(ns3::Time releaseTime) [member function]
cls.add_method('SetReleaseTime',
'void',
[param('ns3::Time', 'releaseTime')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetSchedulingType(ns3::ServiceFlow::SchedulingType schedulingType) [member function]
cls.add_method('SetSchedulingType',
'void',
[param('ns3::ServiceFlow::SchedulingType', 'schedulingType')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetSize(uint32_t size) [member function]
cls.add_method('SetSize',
'void',
[param('uint32_t', 'size')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetSsRecord(ns3::SSRecord * ssRecord) [member function]
cls.add_method('SetSsRecord',
'void',
[param('ns3::SSRecord *', 'ssRecord')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetType(ns3::ReqType type) [member function]
cls.add_method('SetType',
'void',
[param('ns3::ReqType', 'type')])
return
def register_Ns3UlMap_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::UlMap::UlMap(ns3::UlMap const & arg0) [constructor]
cls.add_constructor([param('ns3::UlMap const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::UlMap::UlMap() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::AddUlMapElement(ns3::OfdmUlMapIe ulMapElement) [member function]
cls.add_method('AddUlMapElement',
'void',
[param('ns3::OfdmUlMapIe', 'ulMapElement')])
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::UlMap::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::UlMap::GetAllocationStartTime() const [member function]
cls.add_method('GetAllocationStartTime',
'uint32_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::TypeId ns3::UlMap::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): std::string ns3::UlMap::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::UlMap::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): static ns3::TypeId ns3::UlMap::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::UlMap::GetUcdCount() const [member function]
cls.add_method('GetUcdCount',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UlMap::GetUlMapElements() const [member function]
cls.add_method('GetUlMapElements',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::SetAllocationStartTime(uint32_t allocationStartTime) [member function]
cls.add_method('SetAllocationStartTime',
'void',
[param('uint32_t', 'allocationStartTime')])
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::SetUcdCount(uint8_t ucdCount) [member function]
cls.add_method('SetUcdCount',
'void',
[param('uint8_t', 'ucdCount')])
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3UplinkScheduler_methods(root_module, cls):
## bs-uplink-scheduler.h (module 'wimax'): ns3::UplinkScheduler::UplinkScheduler(ns3::UplinkScheduler const & arg0) [constructor]
cls.add_constructor([param('ns3::UplinkScheduler const &', 'arg0')])
## bs-uplink-scheduler.h (module 'wimax'): ns3::UplinkScheduler::UplinkScheduler() [constructor]
cls.add_constructor([])
## bs-uplink-scheduler.h (module 'wimax'): ns3::UplinkScheduler::UplinkScheduler(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::AddUplinkAllocation(ns3::OfdmUlMapIe & ulMapIe, uint32_t const & allocationSize, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AddUplinkAllocation',
'void',
[param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('uint32_t const &', 'allocationSize'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::AllocateInitialRangingInterval(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AllocateInitialRangingInterval',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): uint32_t ns3::UplinkScheduler::CalculateAllocationStartTime() [member function]
cls.add_method('CalculateAllocationStartTime',
'uint32_t',
[],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): ns3::Ptr<ns3::BaseStationNetDevice> ns3::UplinkScheduler::GetBs() [member function]
cls.add_method('GetBs',
'ns3::Ptr< ns3::BaseStationNetDevice >',
[],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::GetChannelDescriptorsToUpdate(bool & arg0, bool & arg1, bool & arg2, bool & arg3) [member function]
cls.add_method('GetChannelDescriptorsToUpdate',
'void',
[param('bool &', 'arg0'), param('bool &', 'arg1'), param('bool &', 'arg2'), param('bool &', 'arg3')],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): ns3::Time ns3::UplinkScheduler::GetDcdTimeStamp() const [member function]
cls.add_method('GetDcdTimeStamp',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): bool ns3::UplinkScheduler::GetIsInvIrIntrvlAllocated() const [member function]
cls.add_method('GetIsInvIrIntrvlAllocated',
'bool',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): bool ns3::UplinkScheduler::GetIsIrIntrvlAllocated() const [member function]
cls.add_method('GetIsIrIntrvlAllocated',
'bool',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): uint8_t ns3::UplinkScheduler::GetNrIrOppsAllocated() const [member function]
cls.add_method('GetNrIrOppsAllocated',
'uint8_t',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): ns3::Time ns3::UplinkScheduler::GetTimeStampIrInterval() [member function]
cls.add_method('GetTimeStampIrInterval',
'ns3::Time',
[],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): static ns3::TypeId ns3::UplinkScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-uplink-scheduler.h (module 'wimax'): ns3::Time ns3::UplinkScheduler::GetUcdTimeStamp() const [member function]
cls.add_method('GetUcdTimeStamp',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UplinkScheduler::GetUplinkAllocations() const [member function]
cls.add_method('GetUplinkAllocations',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::InitOnce() [member function]
cls.add_method('InitOnce',
'void',
[],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::OnSetRequestedBandwidth(ns3::ServiceFlowRecord * sfr) [member function]
cls.add_method('OnSetRequestedBandwidth',
'void',
[param('ns3::ServiceFlowRecord *', 'sfr')],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::ProcessBandwidthRequest(ns3::BandwidthRequestHeader const & bwRequestHdr) [member function]
cls.add_method('ProcessBandwidthRequest',
'void',
[param('ns3::BandwidthRequestHeader const &', 'bwRequestHdr')],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::ServiceBandwidthRequests(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): bool ns3::UplinkScheduler::ServiceBandwidthRequests(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::ServiceUnsolicitedGrants(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceUnsolicitedGrants',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True, is_pure_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetBs(ns3::Ptr<ns3::BaseStationNetDevice> bs) [member function]
cls.add_method('SetBs',
'void',
[param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetDcdTimeStamp(ns3::Time dcdTimeStamp) [member function]
cls.add_method('SetDcdTimeStamp',
'void',
[param('ns3::Time', 'dcdTimeStamp')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetIsInvIrIntrvlAllocated(bool isInvIrIntrvlAllocated) [member function]
cls.add_method('SetIsInvIrIntrvlAllocated',
'void',
[param('bool', 'isInvIrIntrvlAllocated')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetIsIrIntrvlAllocated(bool isIrIntrvlAllocated) [member function]
cls.add_method('SetIsIrIntrvlAllocated',
'void',
[param('bool', 'isIrIntrvlAllocated')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetNrIrOppsAllocated(uint8_t nrIrOppsAllocated) [member function]
cls.add_method('SetNrIrOppsAllocated',
'void',
[param('uint8_t', 'nrIrOppsAllocated')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetTimeStampIrInterval(ns3::Time timeStampIrInterval) [member function]
cls.add_method('SetTimeStampIrInterval',
'void',
[param('ns3::Time', 'timeStampIrInterval')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetUcdTimeStamp(ns3::Time ucdTimeStamp) [member function]
cls.add_method('SetUcdTimeStamp',
'void',
[param('ns3::Time', 'ucdTimeStamp')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetupServiceFlow(ns3::SSRecord * ssRecord, ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetupServiceFlow',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow *', 'serviceFlow')],
is_virtual=True, is_pure_virtual=True)
return
def register_Ns3UplinkSchedulerMBQoS_methods(root_module, cls):
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::UplinkSchedulerMBQoS::UplinkSchedulerMBQoS(ns3::UplinkSchedulerMBQoS const & arg0) [constructor]
cls.add_constructor([param('ns3::UplinkSchedulerMBQoS const &', 'arg0')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::UplinkSchedulerMBQoS::UplinkSchedulerMBQoS() [constructor]
cls.add_constructor([])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::UplinkSchedulerMBQoS::UplinkSchedulerMBQoS(ns3::Time time) [constructor]
cls.add_constructor([param('ns3::Time', 'time')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::AddUplinkAllocation(ns3::OfdmUlMapIe & ulMapIe, uint32_t const & allocationSize, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AddUplinkAllocation',
'void',
[param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('uint32_t const &', 'allocationSize'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::AllocateInitialRangingInterval(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AllocateInitialRangingInterval',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): uint32_t ns3::UplinkSchedulerMBQoS::CalculateAllocationStartTime() [member function]
cls.add_method('CalculateAllocationStartTime',
'uint32_t',
[],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::CheckDeadline(uint32_t & availableSymbols) [member function]
cls.add_method('CheckDeadline',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::CheckMinimumBandwidth(uint32_t & availableSymbols) [member function]
cls.add_method('CheckMinimumBandwidth',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): uint32_t ns3::UplinkSchedulerMBQoS::CountSymbolsJobs(ns3::Ptr<ns3::UlJob> job) [member function]
cls.add_method('CountSymbolsJobs',
'uint32_t',
[param('ns3::Ptr< ns3::UlJob >', 'job')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): uint32_t ns3::UplinkSchedulerMBQoS::CountSymbolsQueue(std::list<ns3::Ptr<ns3::UlJob>, std::allocator<ns3::Ptr<ns3::UlJob> > > jobs) [member function]
cls.add_method('CountSymbolsQueue',
'uint32_t',
[param('std::list< ns3::Ptr< ns3::UlJob > >', 'jobs')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::Ptr<ns3::UlJob> ns3::UplinkSchedulerMBQoS::CreateUlJob(ns3::SSRecord * ssRecord, ns3::ServiceFlow::SchedulingType schedType, ns3::ReqType reqType) [member function]
cls.add_method('CreateUlJob',
'ns3::Ptr< ns3::UlJob >',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedType'), param('ns3::ReqType', 'reqType')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::Ptr<ns3::UlJob> ns3::UplinkSchedulerMBQoS::DequeueJob(ns3::UlJob::JobPriority priority) [member function]
cls.add_method('DequeueJob',
'ns3::Ptr< ns3::UlJob >',
[param('ns3::UlJob::JobPriority', 'priority')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::Time ns3::UplinkSchedulerMBQoS::DetermineDeadline(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('DetermineDeadline',
'ns3::Time',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::EnqueueJob(ns3::UlJob::JobPriority priority, ns3::Ptr<ns3::UlJob> job) [member function]
cls.add_method('EnqueueJob',
'void',
[param('ns3::UlJob::JobPriority', 'priority'), param('ns3::Ptr< ns3::UlJob >', 'job')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::GetChannelDescriptorsToUpdate(bool & updateDcd, bool & updateUcd, bool & sendDcd, bool & sendUcd) [member function]
cls.add_method('GetChannelDescriptorsToUpdate',
'void',
[param('bool &', 'updateDcd'), param('bool &', 'updateUcd'), param('bool &', 'sendDcd'), param('bool &', 'sendUcd')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): uint32_t ns3::UplinkSchedulerMBQoS::GetPendingSize(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('GetPendingSize',
'uint32_t',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): static ns3::TypeId ns3::UplinkSchedulerMBQoS::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UplinkSchedulerMBQoS::GetUplinkAllocations() const [member function]
cls.add_method('GetUplinkAllocations',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::InitOnce() [member function]
cls.add_method('InitOnce',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::OnSetRequestedBandwidth(ns3::ServiceFlowRecord * sfr) [member function]
cls.add_method('OnSetRequestedBandwidth',
'void',
[param('ns3::ServiceFlowRecord *', 'sfr')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::ProcessBandwidthRequest(ns3::BandwidthRequestHeader const & bwRequestHdr) [member function]
cls.add_method('ProcessBandwidthRequest',
'void',
[param('ns3::BandwidthRequestHeader const &', 'bwRequestHdr')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::ServiceBandwidthRequests(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): bool ns3::UplinkSchedulerMBQoS::ServiceBandwidthRequests(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): bool ns3::UplinkSchedulerMBQoS::ServiceBandwidthRequestsBytes(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols, uint32_t allocationSizeBytes) [member function]
cls.add_method('ServiceBandwidthRequestsBytes',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols'), param('uint32_t', 'allocationSizeBytes')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::ServiceUnsolicitedGrants(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceUnsolicitedGrants',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::SetupServiceFlow(ns3::SSRecord * ssRecord, ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetupServiceFlow',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow *', 'serviceFlow')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::UplinkSchedWindowTimer() [member function]
cls.add_method('UplinkSchedWindowTimer',
'void',
[])
return
def register_Ns3UplinkSchedulerRtps_methods(root_module, cls):
## bs-uplink-scheduler-rtps.h (module 'wimax'): ns3::UplinkSchedulerRtps::UplinkSchedulerRtps(ns3::UplinkSchedulerRtps const & arg0) [constructor]
cls.add_constructor([param('ns3::UplinkSchedulerRtps const &', 'arg0')])
## bs-uplink-scheduler-rtps.h (module 'wimax'): ns3::UplinkSchedulerRtps::UplinkSchedulerRtps() [constructor]
cls.add_constructor([])
## bs-uplink-scheduler-rtps.h (module 'wimax'): ns3::UplinkSchedulerRtps::UplinkSchedulerRtps(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::AddUplinkAllocation(ns3::OfdmUlMapIe & ulMapIe, uint32_t const & allocationSize, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AddUplinkAllocation',
'void',
[param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('uint32_t const &', 'allocationSize'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::AllocateInitialRangingInterval(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AllocateInitialRangingInterval',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): uint32_t ns3::UplinkSchedulerRtps::CalculateAllocationStartTime() [member function]
cls.add_method('CalculateAllocationStartTime',
'uint32_t',
[],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::GetChannelDescriptorsToUpdate(bool & updateDcd, bool & updateUcd, bool & sendDcd, bool & sendUcd) [member function]
cls.add_method('GetChannelDescriptorsToUpdate',
'void',
[param('bool &', 'updateDcd'), param('bool &', 'updateUcd'), param('bool &', 'sendDcd'), param('bool &', 'sendUcd')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): static ns3::TypeId ns3::UplinkSchedulerRtps::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UplinkSchedulerRtps::GetUplinkAllocations() const [member function]
cls.add_method('GetUplinkAllocations',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::InitOnce() [member function]
cls.add_method('InitOnce',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::OnSetRequestedBandwidth(ns3::ServiceFlowRecord * sfr) [member function]
cls.add_method('OnSetRequestedBandwidth',
'void',
[param('ns3::ServiceFlowRecord *', 'sfr')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::ProcessBandwidthRequest(ns3::BandwidthRequestHeader const & bwRequestHdr) [member function]
cls.add_method('ProcessBandwidthRequest',
'void',
[param('ns3::BandwidthRequestHeader const &', 'bwRequestHdr')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::ServiceBandwidthRequests(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): bool ns3::UplinkSchedulerRtps::ServiceBandwidthRequests(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::ServiceUnsolicitedGrants(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceUnsolicitedGrants',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::SetupServiceFlow(ns3::SSRecord * ssRecord, ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetupServiceFlow',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow *', 'serviceFlow')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::ULSchedulerRTPSConnection(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ULSchedulerRTPSConnection',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')])
return
def register_Ns3UplinkSchedulerSimple_methods(root_module, cls):
## bs-uplink-scheduler-simple.h (module 'wimax'): ns3::UplinkSchedulerSimple::UplinkSchedulerSimple(ns3::UplinkSchedulerSimple const & arg0) [constructor]
cls.add_constructor([param('ns3::UplinkSchedulerSimple const &', 'arg0')])
## bs-uplink-scheduler-simple.h (module 'wimax'): ns3::UplinkSchedulerSimple::UplinkSchedulerSimple() [constructor]
cls.add_constructor([])
## bs-uplink-scheduler-simple.h (module 'wimax'): ns3::UplinkSchedulerSimple::UplinkSchedulerSimple(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::AddUplinkAllocation(ns3::OfdmUlMapIe & ulMapIe, uint32_t const & allocationSize, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AddUplinkAllocation',
'void',
[param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('uint32_t const &', 'allocationSize'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::AllocateInitialRangingInterval(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AllocateInitialRangingInterval',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): uint32_t ns3::UplinkSchedulerSimple::CalculateAllocationStartTime() [member function]
cls.add_method('CalculateAllocationStartTime',
'uint32_t',
[],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::GetChannelDescriptorsToUpdate(bool & arg0, bool & arg1, bool & arg2, bool & arg3) [member function]
cls.add_method('GetChannelDescriptorsToUpdate',
'void',
[param('bool &', 'arg0'), param('bool &', 'arg1'), param('bool &', 'arg2'), param('bool &', 'arg3')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): static ns3::TypeId ns3::UplinkSchedulerSimple::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UplinkSchedulerSimple::GetUplinkAllocations() const [member function]
cls.add_method('GetUplinkAllocations',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::InitOnce() [member function]
cls.add_method('InitOnce',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::OnSetRequestedBandwidth(ns3::ServiceFlowRecord * sfr) [member function]
cls.add_method('OnSetRequestedBandwidth',
'void',
[param('ns3::ServiceFlowRecord *', 'sfr')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::ProcessBandwidthRequest(ns3::BandwidthRequestHeader const & bwRequestHdr) [member function]
cls.add_method('ProcessBandwidthRequest',
'void',
[param('ns3::BandwidthRequestHeader const &', 'bwRequestHdr')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::ServiceBandwidthRequests(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): bool ns3::UplinkSchedulerSimple::ServiceBandwidthRequests(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::ServiceUnsolicitedGrants(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceUnsolicitedGrants',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::SetupServiceFlow(ns3::SSRecord * ssRecord, ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetupServiceFlow',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow *', 'serviceFlow')],
is_virtual=True)
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WimaxConnection_methods(root_module, cls):
## wimax-connection.h (module 'wimax'): ns3::WimaxConnection::WimaxConnection(ns3::WimaxConnection const & arg0) [constructor]
cls.add_constructor([param('ns3::WimaxConnection const &', 'arg0')])
## wimax-connection.h (module 'wimax'): ns3::WimaxConnection::WimaxConnection(ns3::Cid cid, ns3::Cid::Type type) [constructor]
cls.add_constructor([param('ns3::Cid', 'cid'), param('ns3::Cid::Type', 'type')])
## wimax-connection.h (module 'wimax'): void ns3::WimaxConnection::ClearFragmentsQueue() [member function]
cls.add_method('ClearFragmentsQueue',
'void',
[])
## wimax-connection.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxConnection::Dequeue(ns3::MacHeaderType::HeaderType packetType=::ns3::MacHeaderType::HeaderType::HEADER_TYPE_GENERIC) [member function]
cls.add_method('Dequeue',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType', default_value='::ns3::MacHeaderType::HeaderType::HEADER_TYPE_GENERIC')])
## wimax-connection.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxConnection::Dequeue(ns3::MacHeaderType::HeaderType packetType, uint32_t availableByte) [member function]
cls.add_method('Dequeue',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType'), param('uint32_t', 'availableByte')])
## wimax-connection.h (module 'wimax'): bool ns3::WimaxConnection::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::GenericMacHeader const & hdr) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::GenericMacHeader const &', 'hdr')])
## wimax-connection.h (module 'wimax'): void ns3::WimaxConnection::FragmentEnqueue(ns3::Ptr<const ns3::Packet> fragment) [member function]
cls.add_method('FragmentEnqueue',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'fragment')])
## wimax-connection.h (module 'wimax'): ns3::Cid ns3::WimaxConnection::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): ns3::WimaxConnection::FragmentsQueue const ns3::WimaxConnection::GetFragmentsQueue() const [member function]
cls.add_method('GetFragmentsQueue',
'ns3::WimaxConnection::FragmentsQueue const',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): ns3::Ptr<ns3::WimaxMacQueue> ns3::WimaxConnection::GetQueue() const [member function]
cls.add_method('GetQueue',
'ns3::Ptr< ns3::WimaxMacQueue >',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): uint8_t ns3::WimaxConnection::GetSchedulingType() const [member function]
cls.add_method('GetSchedulingType',
'uint8_t',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): ns3::ServiceFlow * ns3::WimaxConnection::GetServiceFlow() const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): ns3::Cid::Type ns3::WimaxConnection::GetType() const [member function]
cls.add_method('GetType',
'ns3::Cid::Type',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): static ns3::TypeId ns3::WimaxConnection::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-connection.h (module 'wimax'): std::string ns3::WimaxConnection::GetTypeStr() const [member function]
cls.add_method('GetTypeStr',
'std::string',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): bool ns3::WimaxConnection::HasPackets() const [member function]
cls.add_method('HasPackets',
'bool',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): bool ns3::WimaxConnection::HasPackets(ns3::MacHeaderType::HeaderType packetType) const [member function]
cls.add_method('HasPackets',
'bool',
[param('ns3::MacHeaderType::HeaderType', 'packetType')],
is_const=True)
## wimax-connection.h (module 'wimax'): void ns3::WimaxConnection::SetServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## wimax-connection.h (module 'wimax'): void ns3::WimaxConnection::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='private')
return
def register_Ns3WimaxMacQueue_methods(root_module, cls):
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::WimaxMacQueue(ns3::WimaxMacQueue const & arg0) [constructor]
cls.add_constructor([param('ns3::WimaxMacQueue const &', 'arg0')])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::WimaxMacQueue() [constructor]
cls.add_constructor([])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::WimaxMacQueue(uint32_t maxSize) [constructor]
cls.add_constructor([param('uint32_t', 'maxSize')])
## wimax-mac-queue.h (module 'wimax'): bool ns3::WimaxMacQueue::CheckForFragmentation(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('CheckForFragmentation',
'bool',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Dequeue(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('Dequeue',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Dequeue(ns3::MacHeaderType::HeaderType packetType, uint32_t availableByte) [member function]
cls.add_method('Dequeue',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType'), param('uint32_t', 'availableByte')])
## wimax-mac-queue.h (module 'wimax'): bool ns3::WimaxMacQueue::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::GenericMacHeader const & hdr) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::GenericMacHeader const &', 'hdr')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetFirstPacketHdrSize(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('GetFirstPacketHdrSize',
'uint32_t',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetFirstPacketPayloadSize(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('GetFirstPacketPayloadSize',
'uint32_t',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetFirstPacketRequiredByte(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('GetFirstPacketRequiredByte',
'uint32_t',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetMaxSize() const [member function]
cls.add_method('GetMaxSize',
'uint32_t',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetNBytes() const [member function]
cls.add_method('GetNBytes',
'uint32_t',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::PacketQueue const & ns3::WimaxMacQueue::GetPacketQueue() const [member function]
cls.add_method('GetPacketQueue',
'ns3::WimaxMacQueue::PacketQueue const &',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetQueueLengthWithMACOverhead() [member function]
cls.add_method('GetQueueLengthWithMACOverhead',
'uint32_t',
[])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): static ns3::TypeId ns3::WimaxMacQueue::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-queue.h (module 'wimax'): bool ns3::WimaxMacQueue::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): bool ns3::WimaxMacQueue::IsEmpty(ns3::MacHeaderType::HeaderType packetType) const [member function]
cls.add_method('IsEmpty',
'bool',
[param('ns3::MacHeaderType::HeaderType', 'packetType')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Peek(ns3::GenericMacHeader & hdr) const [member function]
cls.add_method('Peek',
'ns3::Ptr< ns3::Packet >',
[param('ns3::GenericMacHeader &', 'hdr')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Peek(ns3::GenericMacHeader & hdr, ns3::Time & timeStamp) const [member function]
cls.add_method('Peek',
'ns3::Ptr< ns3::Packet >',
[param('ns3::GenericMacHeader &', 'hdr'), param('ns3::Time &', 'timeStamp')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Peek(ns3::MacHeaderType::HeaderType packetType) const [member function]
cls.add_method('Peek',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Peek(ns3::MacHeaderType::HeaderType packetType, ns3::Time & timeStamp) const [member function]
cls.add_method('Peek',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType'), param('ns3::Time &', 'timeStamp')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::SetFragmentNumber(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('SetFragmentNumber',
'void',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::SetFragmentOffset(ns3::MacHeaderType::HeaderType packetType, uint32_t offset) [member function]
cls.add_method('SetFragmentOffset',
'void',
[param('ns3::MacHeaderType::HeaderType', 'packetType'), param('uint32_t', 'offset')])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::SetFragmentation(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('SetFragmentation',
'void',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::SetMaxSize(uint32_t maxSize) [member function]
cls.add_method('SetMaxSize',
'void',
[param('uint32_t', 'maxSize')])
return
def register_Ns3WimaxMacQueueQueueElement_methods(root_module, cls):
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::QueueElement(ns3::WimaxMacQueue::QueueElement const & arg0) [constructor]
cls.add_constructor([param('ns3::WimaxMacQueue::QueueElement const &', 'arg0')])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::QueueElement() [constructor]
cls.add_constructor([])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::QueueElement(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::GenericMacHeader const & hdr, ns3::Time timeStamp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::GenericMacHeader const &', 'hdr'), param('ns3::Time', 'timeStamp')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::QueueElement::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::QueueElement::SetFragmentNumber() [member function]
cls.add_method('SetFragmentNumber',
'void',
[])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::QueueElement::SetFragmentOffset(uint32_t offset) [member function]
cls.add_method('SetFragmentOffset',
'void',
[param('uint32_t', 'offset')])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::QueueElement::SetFragmentation() [member function]
cls.add_method('SetFragmentation',
'void',
[])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_fragmentNumber [variable]
cls.add_instance_attribute('m_fragmentNumber', 'uint32_t', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_fragmentOffset [variable]
cls.add_instance_attribute('m_fragmentOffset', 'uint32_t', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_fragmentation [variable]
cls.add_instance_attribute('m_fragmentation', 'bool', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_hdr [variable]
cls.add_instance_attribute('m_hdr', 'ns3::GenericMacHeader', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_hdrType [variable]
cls.add_instance_attribute('m_hdrType', 'ns3::MacHeaderType', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_packet [variable]
cls.add_instance_attribute('m_packet', 'ns3::Ptr< ns3::Packet >', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_timeStamp [variable]
cls.add_instance_attribute('m_timeStamp', 'ns3::Time', is_const=False)
return
def register_Ns3WimaxMacToMacHeader_methods(root_module, cls):
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::WimaxMacToMacHeader::WimaxMacToMacHeader(ns3::WimaxMacToMacHeader const & arg0) [constructor]
cls.add_constructor([param('ns3::WimaxMacToMacHeader const &', 'arg0')])
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::WimaxMacToMacHeader::WimaxMacToMacHeader() [constructor]
cls.add_constructor([])
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::WimaxMacToMacHeader::WimaxMacToMacHeader(uint32_t len) [constructor]
cls.add_constructor([param('uint32_t', 'len')])
## wimax-mac-to-mac-header.h (module 'wimax'): uint32_t ns3::WimaxMacToMacHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::TypeId ns3::WimaxMacToMacHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-to-mac-header.h (module 'wimax'): uint32_t ns3::WimaxMacToMacHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-to-mac-header.h (module 'wimax'): uint8_t ns3::WimaxMacToMacHeader::GetSizeOfLen() const [member function]
cls.add_method('GetSizeOfLen',
'uint8_t',
[],
is_const=True)
## wimax-mac-to-mac-header.h (module 'wimax'): static ns3::TypeId ns3::WimaxMacToMacHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-to-mac-header.h (module 'wimax'): void ns3::WimaxMacToMacHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-to-mac-header.h (module 'wimax'): void ns3::WimaxMacToMacHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3WimaxPhy_methods(root_module, cls):
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::WimaxPhy(ns3::WimaxPhy const & arg0) [constructor]
cls.add_constructor([param('ns3::WimaxPhy const &', 'arg0')])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::WimaxPhy() [constructor]
cls.add_constructor([])
## wimax-phy.h (module 'wimax'): int64_t ns3::WimaxPhy::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, is_pure_virtual=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::Attach(ns3::Ptr<ns3::WimaxChannel> channel) [member function]
cls.add_method('Attach',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'channel')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## wimax-phy.h (module 'wimax'): ns3::Ptr<ns3::WimaxChannel> ns3::WimaxPhy::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::WimaxChannel >',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::GetChannelBandwidth() const [member function]
cls.add_method('GetChannelBandwidth',
'uint32_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::EventId ns3::WimaxPhy::GetChnlSrchTimeoutEvent() const [member function]
cls.add_method('GetChnlSrchTimeoutEvent',
'ns3::EventId',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::GetDataRate(ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('GetDataRate',
'uint32_t',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Ptr<ns3::NetDevice> ns3::WimaxPhy::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetFrameDuration() const [member function]
cls.add_method('GetFrameDuration',
'ns3::Time',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetFrameDuration(uint8_t frameDurationCode) const [member function]
cls.add_method('GetFrameDuration',
'ns3::Time',
[param('uint8_t', 'frameDurationCode')],
is_const=True)
## wimax-phy.h (module 'wimax'): uint8_t ns3::WimaxPhy::GetFrameDurationCode() const [member function]
cls.add_method('GetFrameDurationCode',
'uint8_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetFrameDurationSec() const [member function]
cls.add_method('GetFrameDurationSec',
'ns3::Time',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'uint32_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::GetGValue() const [member function]
cls.add_method('GetGValue',
'double',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Ptr<ns3::Object> ns3::WimaxPhy::GetMobility() [member function]
cls.add_method('GetMobility',
'ns3::Ptr< ns3::Object >',
[],
is_virtual=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetNfft() const [member function]
cls.add_method('GetNfft',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetNrBytes(uint32_t symbols, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('GetNrBytes',
'uint64_t',
[param('uint32_t', 'symbols'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True)
## wimax-phy.h (module 'wimax'): uint8_t ns3::WimaxPhy::GetNrCarriers() const [member function]
cls.add_method('GetNrCarriers',
'uint8_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetNrSymbols(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('GetNrSymbols',
'uint64_t',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyType ns3::WimaxPhy::GetPhyType() const [member function]
cls.add_method('GetPhyType',
'ns3::WimaxPhy::PhyType',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetPsDuration() const [member function]
cls.add_method('GetPsDuration',
'ns3::Time',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetPsPerFrame() const [member function]
cls.add_method('GetPsPerFrame',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetPsPerSymbol() const [member function]
cls.add_method('GetPsPerSymbol',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Callback<void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::WimaxPhy::GetReceiveCallback() const [member function]
cls.add_method('GetReceiveCallback',
'ns3::Callback< void, ns3::Ptr< ns3::PacketBurst const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetRtg() const [member function]
cls.add_method('GetRtg',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetRxFrequency() const [member function]
cls.add_method('GetRxFrequency',
'uint64_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::GetSamplingFactor() const [member function]
cls.add_method('GetSamplingFactor',
'double',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::GetSamplingFrequency() const [member function]
cls.add_method('GetSamplingFrequency',
'double',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetScanningFrequency() const [member function]
cls.add_method('GetScanningFrequency',
'uint64_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyState ns3::WimaxPhy::GetState() const [member function]
cls.add_method('GetState',
'ns3::WimaxPhy::PhyState',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetSymbolDuration() const [member function]
cls.add_method('GetSymbolDuration',
'ns3::Time',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::GetSymbolsPerFrame() const [member function]
cls.add_method('GetSymbolsPerFrame',
'uint32_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetTransmissionTime(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('GetTransmissionTime',
'ns3::Time',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetTtg() const [member function]
cls.add_method('GetTtg',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetTxFrequency() const [member function]
cls.add_method('GetTxFrequency',
'uint64_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): static ns3::TypeId ns3::WimaxPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-phy.h (module 'wimax'): bool ns3::WimaxPhy::IsDuplex() const [member function]
cls.add_method('IsDuplex',
'bool',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::Send(ns3::SendParams * params) [member function]
cls.add_method('Send',
'void',
[param('ns3::SendParams *', 'params')],
is_virtual=True, is_pure_virtual=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetChannelBandwidth(uint32_t channelBandwidth) [member function]
cls.add_method('SetChannelBandwidth',
'void',
[param('uint32_t', 'channelBandwidth')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetDataRates() [member function]
cls.add_method('SetDataRates',
'void',
[])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetDevice(ns3::Ptr<ns3::WimaxNetDevice> device) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::WimaxNetDevice >', 'device')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetDuplex(uint64_t rxFrequency, uint64_t txFrequency) [member function]
cls.add_method('SetDuplex',
'void',
[param('uint64_t', 'rxFrequency'), param('uint64_t', 'txFrequency')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetFrameDuration(ns3::Time frameDuration) [member function]
cls.add_method('SetFrameDuration',
'void',
[param('ns3::Time', 'frameDuration')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetFrequency(uint32_t frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('uint32_t', 'frequency')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetMobility(ns3::Ptr<ns3::Object> mobility) [member function]
cls.add_method('SetMobility',
'void',
[param('ns3::Ptr< ns3::Object >', 'mobility')],
is_virtual=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetNrCarriers(uint8_t nrCarriers) [member function]
cls.add_method('SetNrCarriers',
'void',
[param('uint8_t', 'nrCarriers')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetPhyParameters() [member function]
cls.add_method('SetPhyParameters',
'void',
[])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetPsDuration(ns3::Time psDuration) [member function]
cls.add_method('SetPsDuration',
'void',
[param('ns3::Time', 'psDuration')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetPsPerFrame(uint16_t psPerFrame) [member function]
cls.add_method('SetPsPerFrame',
'void',
[param('uint16_t', 'psPerFrame')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetPsPerSymbol(uint16_t psPerSymbol) [member function]
cls.add_method('SetPsPerSymbol',
'void',
[param('uint16_t', 'psPerSymbol')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetReceiveCallback(ns3::Callback<void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::PacketBurst const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetScanningCallback() const [member function]
cls.add_method('SetScanningCallback',
'void',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetSimplex(uint64_t frequency) [member function]
cls.add_method('SetSimplex',
'void',
[param('uint64_t', 'frequency')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetState(ns3::WimaxPhy::PhyState state) [member function]
cls.add_method('SetState',
'void',
[param('ns3::WimaxPhy::PhyState', 'state')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetSymbolDuration(ns3::Time symbolDuration) [member function]
cls.add_method('SetSymbolDuration',
'void',
[param('ns3::Time', 'symbolDuration')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetSymbolsPerFrame(uint32_t symbolsPerFrame) [member function]
cls.add_method('SetSymbolsPerFrame',
'void',
[param('uint32_t', 'symbolsPerFrame')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::StartScanning(uint64_t frequency, ns3::Time timeout, ns3::Callback<void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('StartScanning',
'void',
[param('uint64_t', 'frequency'), param('ns3::Time', 'timeout'), param('ns3::Callback< void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::DoAttach(ns3::Ptr<ns3::WimaxChannel> channel) [member function]
cls.add_method('DoAttach',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'channel')],
is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::DoGetDataRate(ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetDataRate',
'uint32_t',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::DoGetFrameDuration(uint8_t frameDurationCode) const [member function]
cls.add_method('DoGetFrameDuration',
'ns3::Time',
[param('uint8_t', 'frameDurationCode')],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): uint8_t ns3::WimaxPhy::DoGetFrameDurationCode() const [member function]
cls.add_method('DoGetFrameDurationCode',
'uint8_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::DoGetGValue() const [member function]
cls.add_method('DoGetGValue',
'double',
[],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::DoGetNfft() const [member function]
cls.add_method('DoGetNfft',
'uint16_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::DoGetNrBytes(uint32_t symbols, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetNrBytes',
'uint64_t',
[param('uint32_t', 'symbols'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::DoGetNrSymbols(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetNrSymbols',
'uint64_t',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::DoGetRtg() const [member function]
cls.add_method('DoGetRtg',
'uint16_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::DoGetSamplingFactor() const [member function]
cls.add_method('DoGetSamplingFactor',
'double',
[],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::DoGetSamplingFrequency() const [member function]
cls.add_method('DoGetSamplingFrequency',
'double',
[],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::DoGetTransmissionTime(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetTransmissionTime',
'ns3::Time',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::DoGetTtg() const [member function]
cls.add_method('DoGetTtg',
'uint16_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::DoSetDataRates() [member function]
cls.add_method('DoSetDataRates',
'void',
[],
is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::DoSetPhyParameters() [member function]
cls.add_method('DoSetPhyParameters',
'void',
[],
is_virtual=True, is_pure_virtual=True, visibility='private')
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True, is_pure_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3BSScheduler_methods(root_module, cls):
## bs-scheduler.h (module 'wimax'): ns3::BSScheduler::BSScheduler(ns3::BSScheduler const & arg0) [constructor]
cls.add_constructor([param('ns3::BSScheduler const &', 'arg0')])
## bs-scheduler.h (module 'wimax'): ns3::BSScheduler::BSScheduler() [constructor]
cls.add_constructor([])
## bs-scheduler.h (module 'wimax'): ns3::BSScheduler::BSScheduler(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-scheduler.h (module 'wimax'): void ns3::BSScheduler::AddDownlinkBurst(ns3::Ptr<const ns3::WimaxConnection> connection, uint8_t diuc, ns3::WimaxPhy::ModulationType modulationType, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('AddDownlinkBurst',
'void',
[param('ns3::Ptr< ns3::WimaxConnection const >', 'connection'), param('uint8_t', 'diuc'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')],
is_virtual=True, is_pure_virtual=True)
## bs-scheduler.h (module 'wimax'): bool ns3::BSScheduler::CheckForFragmentation(ns3::Ptr<ns3::WimaxConnection> connection, int availableSymbols, ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('CheckForFragmentation',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection >', 'connection'), param('int', 'availableSymbols'), param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## bs-scheduler.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::BSScheduler::CreateUgsBurst(ns3::ServiceFlow * serviceFlow, ns3::WimaxPhy::ModulationType modulationType, uint32_t availableSymbols) [member function]
cls.add_method('CreateUgsBurst',
'ns3::Ptr< ns3::PacketBurst >',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint32_t', 'availableSymbols')],
is_virtual=True, is_pure_virtual=True)
## bs-scheduler.h (module 'wimax'): ns3::Ptr<ns3::BaseStationNetDevice> ns3::BSScheduler::GetBs() [member function]
cls.add_method('GetBs',
'ns3::Ptr< ns3::BaseStationNetDevice >',
[],
is_virtual=True)
## bs-scheduler.h (module 'wimax'): std::list<std::pair<ns3::OfdmDlMapIe *, ns3::Ptr<ns3::PacketBurst> >, std::allocator<std::pair<ns3::OfdmDlMapIe *, ns3::Ptr<ns3::PacketBurst> > > > * ns3::BSScheduler::GetDownlinkBursts() const [member function]
cls.add_method('GetDownlinkBursts',
'std::list< std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > > > *',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## bs-scheduler.h (module 'wimax'): static ns3::TypeId ns3::BSScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-scheduler.h (module 'wimax'): void ns3::BSScheduler::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True, is_pure_virtual=True)
## bs-scheduler.h (module 'wimax'): bool ns3::BSScheduler::SelectConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')],
is_virtual=True, is_pure_virtual=True)
## bs-scheduler.h (module 'wimax'): void ns3::BSScheduler::SetBs(ns3::Ptr<ns3::BaseStationNetDevice> bs) [member function]
cls.add_method('SetBs',
'void',
[param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')],
is_virtual=True)
return
def register_Ns3BSSchedulerRtps_methods(root_module, cls):
## bs-scheduler-rtps.h (module 'wimax'): ns3::BSSchedulerRtps::BSSchedulerRtps(ns3::BSSchedulerRtps const & arg0) [constructor]
cls.add_constructor([param('ns3::BSSchedulerRtps const &', 'arg0')])
## bs-scheduler-rtps.h (module 'wimax'): ns3::BSSchedulerRtps::BSSchedulerRtps() [constructor]
cls.add_constructor([])
## bs-scheduler-rtps.h (module 'wimax'): ns3::BSSchedulerRtps::BSSchedulerRtps(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::AddDownlinkBurst(ns3::Ptr<const ns3::WimaxConnection> connection, uint8_t diuc, ns3::WimaxPhy::ModulationType modulationType, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('AddDownlinkBurst',
'void',
[param('ns3::Ptr< ns3::WimaxConnection const >', 'connection'), param('uint8_t', 'diuc'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')],
is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerBEConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerBEConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerBasicConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerBasicConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerBroadcastConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerBroadcastConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerInitialRangingConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerInitialRangingConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerNRTPSConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerNRTPSConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerPrimaryConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerPrimaryConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerRTPSConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerRTPSConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerUGSConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerUGSConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::BSSchedulerRtps::CreateUgsBurst(ns3::ServiceFlow * serviceFlow, ns3::WimaxPhy::ModulationType modulationType, uint32_t availableSymbols) [member function]
cls.add_method('CreateUgsBurst',
'ns3::Ptr< ns3::PacketBurst >',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint32_t', 'availableSymbols')],
is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): std::list<std::pair<ns3::OfdmDlMapIe *, ns3::Ptr<ns3::PacketBurst> >, std::allocator<std::pair<ns3::OfdmDlMapIe *, ns3::Ptr<ns3::PacketBurst> > > > * ns3::BSSchedulerRtps::GetDownlinkBursts() const [member function]
cls.add_method('GetDownlinkBursts',
'std::list< std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > > > *',
[],
is_const=True, is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): static ns3::TypeId ns3::BSSchedulerRtps::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectBEConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectBEConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')],
is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectIRandBCConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectIRandBCConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectMenagementConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectMenagementConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectNRTPSConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectNRTPSConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectRTPSConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectRTPSConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectUGSConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectUGSConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
return
def register_Ns3BSSchedulerSimple_methods(root_module, cls):
## bs-scheduler-simple.h (module 'wimax'): ns3::BSSchedulerSimple::BSSchedulerSimple(ns3::BSSchedulerSimple const & arg0) [constructor]
cls.add_constructor([param('ns3::BSSchedulerSimple const &', 'arg0')])
## bs-scheduler-simple.h (module 'wimax'): ns3::BSSchedulerSimple::BSSchedulerSimple() [constructor]
cls.add_constructor([])
## bs-scheduler-simple.h (module 'wimax'): ns3::BSSchedulerSimple::BSSchedulerSimple(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-scheduler-simple.h (module 'wimax'): void ns3::BSSchedulerSimple::AddDownlinkBurst(ns3::Ptr<const ns3::WimaxConnection> connection, uint8_t diuc, ns3::WimaxPhy::ModulationType modulationType, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('AddDownlinkBurst',
'void',
[param('ns3::Ptr< ns3::WimaxConnection const >', 'connection'), param('uint8_t', 'diuc'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')],
is_virtual=True)
## bs-scheduler-simple.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::BSSchedulerSimple::CreateUgsBurst(ns3::ServiceFlow * serviceFlow, ns3::WimaxPhy::ModulationType modulationType, uint32_t availableSymbols) [member function]
cls.add_method('CreateUgsBurst',
'ns3::Ptr< ns3::PacketBurst >',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint32_t', 'availableSymbols')],
is_virtual=True)
## bs-scheduler-simple.h (module 'wimax'): std::list<std::pair<ns3::OfdmDlMapIe *, ns3::Ptr<ns3::PacketBurst> >, std::allocator<std::pair<ns3::OfdmDlMapIe *, ns3::Ptr<ns3::PacketBurst> > > > * ns3::BSSchedulerSimple::GetDownlinkBursts() const [member function]
cls.add_method('GetDownlinkBursts',
'std::list< std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > > > *',
[],
is_const=True, is_virtual=True)
## bs-scheduler-simple.h (module 'wimax'): static ns3::TypeId ns3::BSSchedulerSimple::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-scheduler-simple.h (module 'wimax'): void ns3::BSSchedulerSimple::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-scheduler-simple.h (module 'wimax'): bool ns3::BSSchedulerSimple::SelectConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')],
is_virtual=True)
return
def register_Ns3BandwidthRequestHeader_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::BandwidthRequestHeader::BandwidthRequestHeader(ns3::BandwidthRequestHeader const & arg0) [constructor]
cls.add_constructor([param('ns3::BandwidthRequestHeader const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::BandwidthRequestHeader::BandwidthRequestHeader() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::BandwidthRequestHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::BandwidthRequestHeader::GetBr() const [member function]
cls.add_method('GetBr',
'uint32_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::Cid ns3::BandwidthRequestHeader::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::BandwidthRequestHeader::GetEc() const [member function]
cls.add_method('GetEc',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::BandwidthRequestHeader::GetHcs() const [member function]
cls.add_method('GetHcs',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::BandwidthRequestHeader::GetHt() const [member function]
cls.add_method('GetHt',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::BandwidthRequestHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::BandwidthRequestHeader::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::BandwidthRequestHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::BandwidthRequestHeader::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::BandwidthRequestHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetBr(uint32_t br) [member function]
cls.add_method('SetBr',
'void',
[param('uint32_t', 'br')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetEc(uint8_t ec) [member function]
cls.add_method('SetEc',
'void',
[param('uint8_t', 'ec')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetHcs(uint8_t hcs) [member function]
cls.add_method('SetHcs',
'void',
[param('uint8_t', 'hcs')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetHt(uint8_t ht) [member function]
cls.add_method('SetHt',
'void',
[param('uint8_t', 'ht')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
## wimax-mac-header.h (module 'wimax'): bool ns3::BandwidthRequestHeader::check_hcs() const [member function]
cls.add_method('check_hcs',
'bool',
[],
is_const=True)
return
def register_Ns3BsServiceFlowManager_methods(root_module, cls):
## bs-service-flow-manager.h (module 'wimax'): ns3::BsServiceFlowManager::BsServiceFlowManager(ns3::BsServiceFlowManager const & arg0) [constructor]
cls.add_constructor([param('ns3::BsServiceFlowManager const &', 'arg0')])
## bs-service-flow-manager.h (module 'wimax'): ns3::BsServiceFlowManager::BsServiceFlowManager(ns3::Ptr<ns3::BaseStationNetDevice> device) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'device')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::AddMulticastServiceFlow(ns3::ServiceFlow sf, ns3::WimaxPhy::ModulationType modulation) [member function]
cls.add_method('AddMulticastServiceFlow',
'void',
[param('ns3::ServiceFlow', 'sf'), param('ns3::WimaxPhy::ModulationType', 'modulation')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::AddServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::AllocateServiceFlows(ns3::DsaReq const & dsaReq, ns3::Cid cid) [member function]
cls.add_method('AllocateServiceFlows',
'void',
[param('ns3::DsaReq const &', 'dsaReq'), param('ns3::Cid', 'cid')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## bs-service-flow-manager.h (module 'wimax'): ns3::EventId ns3::BsServiceFlowManager::GetDsaAckTimeoutEvent() const [member function]
cls.add_method('GetDsaAckTimeoutEvent',
'ns3::EventId',
[],
is_const=True)
## bs-service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::BsServiceFlowManager::GetServiceFlow(uint32_t sfid) const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[param('uint32_t', 'sfid')],
is_const=True)
## bs-service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::BsServiceFlowManager::GetServiceFlow(ns3::Cid cid) const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[param('ns3::Cid', 'cid')],
is_const=True)
## bs-service-flow-manager.h (module 'wimax'): std::vector<ns3::ServiceFlow *, std::allocator<ns3::ServiceFlow *> > ns3::BsServiceFlowManager::GetServiceFlows(ns3::ServiceFlow::SchedulingType schedulingType) const [member function]
cls.add_method('GetServiceFlows',
'std::vector< ns3::ServiceFlow * >',
[param('ns3::ServiceFlow::SchedulingType', 'schedulingType')],
is_const=True)
## bs-service-flow-manager.h (module 'wimax'): static ns3::TypeId ns3::BsServiceFlowManager::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::ProcessDsaAck(ns3::DsaAck const & dsaAck, ns3::Cid cid) [member function]
cls.add_method('ProcessDsaAck',
'void',
[param('ns3::DsaAck const &', 'dsaAck'), param('ns3::Cid', 'cid')])
## bs-service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::BsServiceFlowManager::ProcessDsaReq(ns3::DsaReq const & dsaReq, ns3::Cid cid) [member function]
cls.add_method('ProcessDsaReq',
'ns3::ServiceFlow *',
[param('ns3::DsaReq const &', 'dsaReq'), param('ns3::Cid', 'cid')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::SetMaxDsaRspRetries(uint8_t maxDsaRspRetries) [member function]
cls.add_method('SetMaxDsaRspRetries',
'void',
[param('uint8_t', 'maxDsaRspRetries')])
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::ObjectBase*'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['void'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::Ptr<ns3::Packet const> '], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::Ptr<ns3::PacketBurst const> '], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['bool'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['unsigned long'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::Mac48Address const&'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::Mac48Address'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::Cid const&'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::Cid'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::Ptr<ns3::PacketBurst> '], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::Ptr<ns3::NetDevice> '], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['unsigned short'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::Address const&'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::NetDevice::PacketType'], visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(std::size_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('std::size_t', 'i')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): std::size_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'std::size_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3ConnectionManager_methods(root_module, cls):
## connection-manager.h (module 'wimax'): ns3::ConnectionManager::ConnectionManager(ns3::ConnectionManager const & arg0) [constructor]
cls.add_constructor([param('ns3::ConnectionManager const &', 'arg0')])
## connection-manager.h (module 'wimax'): ns3::ConnectionManager::ConnectionManager() [constructor]
cls.add_constructor([])
## connection-manager.h (module 'wimax'): void ns3::ConnectionManager::AddConnection(ns3::Ptr<ns3::WimaxConnection> connection, ns3::Cid::Type type) [member function]
cls.add_method('AddConnection',
'void',
[param('ns3::Ptr< ns3::WimaxConnection >', 'connection'), param('ns3::Cid::Type', 'type')])
## connection-manager.h (module 'wimax'): void ns3::ConnectionManager::AllocateManagementConnections(ns3::SSRecord * ssRecord, ns3::RngRsp * rngrsp) [member function]
cls.add_method('AllocateManagementConnections',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::RngRsp *', 'rngrsp')])
## connection-manager.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::ConnectionManager::CreateConnection(ns3::Cid::Type type) [member function]
cls.add_method('CreateConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[param('ns3::Cid::Type', 'type')])
## connection-manager.h (module 'wimax'): void ns3::ConnectionManager::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## connection-manager.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::ConnectionManager::GetConnection(ns3::Cid cid) [member function]
cls.add_method('GetConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[param('ns3::Cid', 'cid')])
## connection-manager.h (module 'wimax'): std::vector<ns3::Ptr<ns3::WimaxConnection>, std::allocator<ns3::Ptr<ns3::WimaxConnection> > > ns3::ConnectionManager::GetConnections(ns3::Cid::Type type) const [member function]
cls.add_method('GetConnections',
'std::vector< ns3::Ptr< ns3::WimaxConnection > >',
[param('ns3::Cid::Type', 'type')],
is_const=True)
## connection-manager.h (module 'wimax'): uint32_t ns3::ConnectionManager::GetNPackets(ns3::Cid::Type type, ns3::ServiceFlow::SchedulingType schedulingType) const [member function]
cls.add_method('GetNPackets',
'uint32_t',
[param('ns3::Cid::Type', 'type'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType')],
is_const=True)
## connection-manager.h (module 'wimax'): static ns3::TypeId ns3::ConnectionManager::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## connection-manager.h (module 'wimax'): bool ns3::ConnectionManager::HasPackets() const [member function]
cls.add_method('HasPackets',
'bool',
[],
is_const=True)
## connection-manager.h (module 'wimax'): void ns3::ConnectionManager::SetCidFactory(ns3::CidFactory * cidFactory) [member function]
cls.add_method('SetCidFactory',
'void',
[param('ns3::CidFactory *', 'cidFactory')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3Dcd_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::Dcd::Dcd(ns3::Dcd const & arg0) [constructor]
cls.add_constructor([param('ns3::Dcd const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::Dcd::Dcd() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::AddDlBurstProfile(ns3::OfdmDlBurstProfile dlBurstProfile) [member function]
cls.add_method('AddDlBurstProfile',
'void',
[param('ns3::OfdmDlBurstProfile', 'dlBurstProfile')])
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::Dcd::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDcdChannelEncodings ns3::Dcd::GetChannelEncodings() const [member function]
cls.add_method('GetChannelEncodings',
'ns3::OfdmDcdChannelEncodings',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::Dcd::GetConfigurationChangeCount() const [member function]
cls.add_method('GetConfigurationChangeCount',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): std::vector<ns3::OfdmDlBurstProfile, std::allocator<ns3::OfdmDlBurstProfile> > ns3::Dcd::GetDlBurstProfiles() const [member function]
cls.add_method('GetDlBurstProfiles',
'std::vector< ns3::OfdmDlBurstProfile >',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::TypeId ns3::Dcd::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): std::string ns3::Dcd::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::Dcd::GetNrDlBurstProfiles() const [member function]
cls.add_method('GetNrDlBurstProfiles',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::Dcd::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): static ns3::TypeId ns3::Dcd::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::SetChannelEncodings(ns3::OfdmDcdChannelEncodings channelEncodings) [member function]
cls.add_method('SetChannelEncodings',
'void',
[param('ns3::OfdmDcdChannelEncodings', 'channelEncodings')])
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::SetConfigurationChangeCount(uint8_t configurationChangeCount) [member function]
cls.add_method('SetConfigurationChangeCount',
'void',
[param('uint8_t', 'configurationChangeCount')])
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::SetNrDlBurstProfiles(uint8_t nrDlBurstProfiles) [member function]
cls.add_method('SetNrDlBurstProfiles',
'void',
[param('uint8_t', 'nrDlBurstProfiles')])
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, std::size_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('std::size_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DlMap_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::DlMap::DlMap(ns3::DlMap const & arg0) [constructor]
cls.add_constructor([param('ns3::DlMap const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::DlMap::DlMap() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::AddDlMapElement(ns3::OfdmDlMapIe dlMapElement) [member function]
cls.add_method('AddDlMapElement',
'void',
[param('ns3::OfdmDlMapIe', 'dlMapElement')])
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::DlMap::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## dl-mac-messages.h (module 'wimax'): ns3::Mac48Address ns3::DlMap::GetBaseStationId() const [member function]
cls.add_method('GetBaseStationId',
'ns3::Mac48Address',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::DlMap::GetDcdCount() const [member function]
cls.add_method('GetDcdCount',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): std::list<ns3::OfdmDlMapIe, std::allocator<ns3::OfdmDlMapIe> > ns3::DlMap::GetDlMapElements() const [member function]
cls.add_method('GetDlMapElements',
'std::list< ns3::OfdmDlMapIe >',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::TypeId ns3::DlMap::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): std::string ns3::DlMap::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::DlMap::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): static ns3::TypeId ns3::DlMap::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::SetBaseStationId(ns3::Mac48Address baseStationID) [member function]
cls.add_method('SetBaseStationId',
'void',
[param('ns3::Mac48Address', 'baseStationID')])
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::SetDcdCount(uint8_t dcdCount) [member function]
cls.add_method('SetDcdCount',
'void',
[param('uint8_t', 'dcdCount')])
return
def register_Ns3DsaAck_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::DsaAck::DsaAck(ns3::DsaAck const & arg0) [constructor]
cls.add_constructor([param('ns3::DsaAck const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::DsaAck::DsaAck() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaAck::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaAck::GetConfirmationCode() const [member function]
cls.add_method('GetConfirmationCode',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::DsaAck::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): std::string ns3::DsaAck::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaAck::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaAck::GetTransactionId() const [member function]
cls.add_method('GetTransactionId',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::DsaAck::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::DsaAck::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaAck::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaAck::SetConfirmationCode(uint16_t confirmationCode) [member function]
cls.add_method('SetConfirmationCode',
'void',
[param('uint16_t', 'confirmationCode')])
## mac-messages.h (module 'wimax'): void ns3::DsaAck::SetTransactionId(uint16_t transactionId) [member function]
cls.add_method('SetTransactionId',
'void',
[param('uint16_t', 'transactionId')])
return
def register_Ns3DsaReq_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::DsaReq::DsaReq(ns3::DsaReq const & arg0) [constructor]
cls.add_constructor([param('ns3::DsaReq const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::DsaReq::DsaReq() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): ns3::DsaReq::DsaReq(ns3::ServiceFlow sf) [constructor]
cls.add_constructor([param('ns3::ServiceFlow', 'sf')])
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaReq::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::Cid ns3::DsaReq::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::DsaReq::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): std::string ns3::DsaReq::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaReq::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::ServiceFlow ns3::DsaReq::GetServiceFlow() const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaReq::GetSfid() const [member function]
cls.add_method('GetSfid',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaReq::GetTransactionId() const [member function]
cls.add_method('GetTransactionId',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::DsaReq::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::DsaReq::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaReq::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaReq::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## mac-messages.h (module 'wimax'): void ns3::DsaReq::SetServiceFlow(ns3::ServiceFlow sf) [member function]
cls.add_method('SetServiceFlow',
'void',
[param('ns3::ServiceFlow', 'sf')])
## mac-messages.h (module 'wimax'): void ns3::DsaReq::SetSfid(uint32_t sfid) [member function]
cls.add_method('SetSfid',
'void',
[param('uint32_t', 'sfid')])
## mac-messages.h (module 'wimax'): void ns3::DsaReq::SetTransactionId(uint16_t transactionId) [member function]
cls.add_method('SetTransactionId',
'void',
[param('uint16_t', 'transactionId')])
return
def register_Ns3DsaRsp_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::DsaRsp::DsaRsp(ns3::DsaRsp const & arg0) [constructor]
cls.add_constructor([param('ns3::DsaRsp const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::DsaRsp::DsaRsp() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaRsp::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::Cid ns3::DsaRsp::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaRsp::GetConfirmationCode() const [member function]
cls.add_method('GetConfirmationCode',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::DsaRsp::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): std::string ns3::DsaRsp::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaRsp::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::ServiceFlow ns3::DsaRsp::GetServiceFlow() const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaRsp::GetSfid() const [member function]
cls.add_method('GetSfid',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaRsp::GetTransactionId() const [member function]
cls.add_method('GetTransactionId',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::DsaRsp::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetConfirmationCode(uint16_t confirmationCode) [member function]
cls.add_method('SetConfirmationCode',
'void',
[param('uint16_t', 'confirmationCode')])
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetServiceFlow(ns3::ServiceFlow sf) [member function]
cls.add_method('SetServiceFlow',
'void',
[param('ns3::ServiceFlow', 'sf')])
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetSfid(uint32_t sfid) [member function]
cls.add_method('SetSfid',
'void',
[param('uint32_t', 'sfid')])
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetTransactionId(uint16_t transactionId) [member function]
cls.add_method('SetTransactionId',
'void',
[param('uint16_t', 'transactionId')])
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double c1, double c2, double v1, double v2, double r) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'c1'), param('double', 'c2'), param('double', 'v1'), param('double', 'v2'), param('double', 'r')],
is_virtual=True, visibility='private')
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
is_virtual=True, visibility='private')
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True, visibility='private')
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True, visibility='private')
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True, visibility='private')
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_virtual=True, is_pure_virtual=True, visibility='protected')
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3FixedRssLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel::FixedRssLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FixedRssLossModel::SetRss(double rss) [member function]
cls.add_method('SetRss',
'void',
[param('double', 'rss')])
## propagation-loss-model.h (module 'propagation'): double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::FixedRssLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, visibility='private')
return
def register_Ns3FragmentationSubheader_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::FragmentationSubheader::FragmentationSubheader(ns3::FragmentationSubheader const & arg0) [constructor]
cls.add_constructor([param('ns3::FragmentationSubheader const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::FragmentationSubheader::FragmentationSubheader() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::FragmentationSubheader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::FragmentationSubheader::GetFc() const [member function]
cls.add_method('GetFc',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::FragmentationSubheader::GetFsn() const [member function]
cls.add_method('GetFsn',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::FragmentationSubheader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::FragmentationSubheader::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::FragmentationSubheader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::FragmentationSubheader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::FragmentationSubheader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::FragmentationSubheader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::FragmentationSubheader::SetFc(uint8_t fc) [member function]
cls.add_method('SetFc',
'void',
[param('uint8_t', 'fc')])
## wimax-mac-header.h (module 'wimax'): void ns3::FragmentationSubheader::SetFsn(uint8_t fsn) [member function]
cls.add_method('SetFsn',
'void',
[param('uint8_t', 'fsn')])
return
def register_Ns3FriisPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetFrequency(double frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'frequency')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetMinLoss(double minLoss) [member function]
cls.add_method('SetMinLoss',
'void',
[param('double', 'minLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetMinLoss() const [member function]
cls.add_method('GetMinLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::FriisPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, visibility='private')
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3GenericMacHeader_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::GenericMacHeader::GenericMacHeader(ns3::GenericMacHeader const & arg0) [constructor]
cls.add_constructor([param('ns3::GenericMacHeader const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::GenericMacHeader::GenericMacHeader() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::GenericMacHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetCi() const [member function]
cls.add_method('GetCi',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::Cid ns3::GenericMacHeader::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetEc() const [member function]
cls.add_method('GetEc',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetEks() const [member function]
cls.add_method('GetEks',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetHcs() const [member function]
cls.add_method('GetHcs',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetHt() const [member function]
cls.add_method('GetHt',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::GenericMacHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint16_t ns3::GenericMacHeader::GetLen() const [member function]
cls.add_method('GetLen',
'uint16_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::GenericMacHeader::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::GenericMacHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::GenericMacHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetCi(uint8_t ci) [member function]
cls.add_method('SetCi',
'void',
[param('uint8_t', 'ci')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetEc(uint8_t ec) [member function]
cls.add_method('SetEc',
'void',
[param('uint8_t', 'ec')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetEks(uint8_t eks) [member function]
cls.add_method('SetEks',
'void',
[param('uint8_t', 'eks')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetHcs(uint8_t hcs) [member function]
cls.add_method('SetHcs',
'void',
[param('uint8_t', 'hcs')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetHt(uint8_t ht) [member function]
cls.add_method('SetHt',
'void',
[param('uint8_t', 'ht')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetLen(uint16_t len) [member function]
cls.add_method('SetLen',
'void',
[param('uint16_t', 'len')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
## wimax-mac-header.h (module 'wimax'): bool ns3::GenericMacHeader::check_hcs() const [member function]
cls.add_method('check_hcs',
'bool',
[],
is_const=True)
return
def register_Ns3GrantManagementSubheader_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::GrantManagementSubheader::GrantManagementSubheader(ns3::GrantManagementSubheader const & arg0) [constructor]
cls.add_constructor([param('ns3::GrantManagementSubheader const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::GrantManagementSubheader::GrantManagementSubheader() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::GrantManagementSubheader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::GrantManagementSubheader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::GrantManagementSubheader::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint16_t ns3::GrantManagementSubheader::GetPbr() const [member function]
cls.add_method('GetPbr',
'uint16_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GrantManagementSubheader::GetPm() const [member function]
cls.add_method('GetPm',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::GrantManagementSubheader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GrantManagementSubheader::GetSi() const [member function]
cls.add_method('GetSi',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::GrantManagementSubheader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::SetPbr(uint16_t pbr) [member function]
cls.add_method('SetPbr',
'void',
[param('uint16_t', 'pbr')])
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::SetPm(uint8_t pm) [member function]
cls.add_method('SetPm',
'void',
[param('uint8_t', 'pm')])
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::SetSi(uint8_t si) [member function]
cls.add_method('SetSi',
'void',
[param('uint8_t', 'si')])
return
def register_Ns3IpcsClassifier_methods(root_module, cls):
## ipcs-classifier.h (module 'wimax'): ns3::IpcsClassifier::IpcsClassifier(ns3::IpcsClassifier const & arg0) [constructor]
cls.add_constructor([param('ns3::IpcsClassifier const &', 'arg0')])
## ipcs-classifier.h (module 'wimax'): ns3::IpcsClassifier::IpcsClassifier() [constructor]
cls.add_constructor([])
## ipcs-classifier.h (module 'wimax'): ns3::ServiceFlow * ns3::IpcsClassifier::Classify(ns3::Ptr<const ns3::Packet> packet, ns3::Ptr<ns3::ServiceFlowManager> sfm, ns3::ServiceFlow::Direction dir) [member function]
cls.add_method('Classify',
'ns3::ServiceFlow *',
[param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::Ptr< ns3::ServiceFlowManager >', 'sfm'), param('ns3::ServiceFlow::Direction', 'dir')])
## ipcs-classifier.h (module 'wimax'): static ns3::TypeId ns3::IpcsClassifier::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function]
cls.add_method('SetPathLossExponent',
'void',
[param('double', 'n')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function]
cls.add_method('GetPathLossExponent',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function]
cls.add_method('SetReference',
'void',
[param('double', 'referenceDistance'), param('double', 'referenceLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::LogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, visibility='private')
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3MatrixPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, double loss, bool symmetric=true) [member function]
cls.add_method('SetLoss',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetDefaultLoss(double defaultLoss) [member function]
cls.add_method('SetDefaultLoss',
'void',
[param('double', 'defaultLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::MatrixPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, visibility='private')
return
def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True, visibility='private')
## propagation-loss-model.h (module 'propagation'): int64_t ns3::NakagamiPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, visibility='private')
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::NetDevice::PromiscReceiveCallback cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::NetDevice::ReceiveCallback cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True, is_pure_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True, is_pure_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function]
cls.add_method('GetLocalTime',
'ns3::Time',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Node::DeviceAdditionListener listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Node::ProtocolHandler handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Node::DeviceAdditionListener listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Node::ProtocolHandler handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::ios_base::openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::ios_base::openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag, uint32_t start, uint32_t end) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag'), param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header, uint32_t size) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True, deprecated=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleOfdmWimaxPhy_methods(root_module, cls):
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy::SimpleOfdmWimaxPhy(ns3::SimpleOfdmWimaxPhy const & arg0) [constructor]
cls.add_constructor([param('ns3::SimpleOfdmWimaxPhy const &', 'arg0')])
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy::SimpleOfdmWimaxPhy() [constructor]
cls.add_constructor([])
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy::SimpleOfdmWimaxPhy(char * tracesPath) [constructor]
cls.add_constructor([param('char *', 'tracesPath')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::ActivateLoss(bool loss) [member function]
cls.add_method('ActivateLoss',
'void',
[param('bool', 'loss')])
## simple-ofdm-wimax-phy.h (module 'wimax'): int64_t ns3::SimpleOfdmWimaxPhy::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::DoAttach(ns3::Ptr<ns3::WimaxChannel> channel) [member function]
cls.add_method('DoAttach',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'channel')],
is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): uint32_t ns3::SimpleOfdmWimaxPhy::GetBandwidth() const [member function]
cls.add_method('GetBandwidth',
'uint32_t',
[],
is_const=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::GetNoiseFigure() const [member function]
cls.add_method('GetNoiseFigure',
'double',
[],
is_const=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyType ns3::SimpleOfdmWimaxPhy::GetPhyType() const [member function]
cls.add_method('GetPhyType',
'ns3::WimaxPhy::PhyType',
[],
is_const=True, is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::GetTxPower() const [member function]
cls.add_method('GetTxPower',
'double',
[],
is_const=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): static ns3::TypeId ns3::SimpleOfdmWimaxPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyRxBegin(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyRxBegin',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyRxDrop(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyRxDrop',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyRxEnd(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyRxEnd',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyTxBegin(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyTxBegin',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyTxDrop(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyTxDrop',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyTxEnd(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyTxEnd',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::Send(ns3::Ptr<ns3::PacketBurst> burst, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::Send(ns3::SendParams * params) [member function]
cls.add_method('Send',
'void',
[param('ns3::SendParams *', 'params')],
is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetBandwidth(uint32_t BW) [member function]
cls.add_method('SetBandwidth',
'void',
[param('uint32_t', 'BW')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetNoiseFigure(double nf) [member function]
cls.add_method('SetNoiseFigure',
'void',
[param('double', 'nf')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetReceiveCallback(ns3::Callback<void, ns3::Ptr<ns3::PacketBurst>, ns3::Ptr<ns3::WimaxConnection>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::PacketBurst >, ns3::Ptr< ns3::WimaxConnection >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetSNRToBlockErrorRateTracesPath(char * tracesPath) [member function]
cls.add_method('SetSNRToBlockErrorRateTracesPath',
'void',
[param('char *', 'tracesPath')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetTxPower(double txPower) [member function]
cls.add_method('SetTxPower',
'void',
[param('double', 'txPower')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::StartReceive(uint32_t burstSize, bool isFirstBlock, uint64_t frequency, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction, double rxPower, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('StartReceive',
'void',
[param('uint32_t', 'burstSize'), param('bool', 'isFirstBlock'), param('uint64_t', 'frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'rxPower'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): uint32_t ns3::SimpleOfdmWimaxPhy::DoGetDataRate(ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetDataRate',
'uint32_t',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::Time ns3::SimpleOfdmWimaxPhy::DoGetFrameDuration(uint8_t frameDurationCode) const [member function]
cls.add_method('DoGetFrameDuration',
'ns3::Time',
[param('uint8_t', 'frameDurationCode')],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): uint8_t ns3::SimpleOfdmWimaxPhy::DoGetFrameDurationCode() const [member function]
cls.add_method('DoGetFrameDurationCode',
'uint8_t',
[],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::DoGetGValue() const [member function]
cls.add_method('DoGetGValue',
'double',
[],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): uint16_t ns3::SimpleOfdmWimaxPhy::DoGetNfft() const [member function]
cls.add_method('DoGetNfft',
'uint16_t',
[],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): uint64_t ns3::SimpleOfdmWimaxPhy::DoGetNrBytes(uint32_t symbols, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetNrBytes',
'uint64_t',
[param('uint32_t', 'symbols'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): uint64_t ns3::SimpleOfdmWimaxPhy::DoGetNrSymbols(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetNrSymbols',
'uint64_t',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): uint16_t ns3::SimpleOfdmWimaxPhy::DoGetRtg() const [member function]
cls.add_method('DoGetRtg',
'uint16_t',
[],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::DoGetSamplingFactor() const [member function]
cls.add_method('DoGetSamplingFactor',
'double',
[],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::DoGetSamplingFrequency() const [member function]
cls.add_method('DoGetSamplingFrequency',
'double',
[],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::Time ns3::SimpleOfdmWimaxPhy::DoGetTransmissionTime(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetTransmissionTime',
'ns3::Time',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): uint16_t ns3::SimpleOfdmWimaxPhy::DoGetTtg() const [member function]
cls.add_method('DoGetTtg',
'uint16_t',
[],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::DoSetDataRates() [member function]
cls.add_method('DoSetDataRates',
'void',
[],
is_virtual=True, visibility='private')
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::DoSetPhyParameters() [member function]
cls.add_method('DoSetPhyParameters',
'void',
[],
is_virtual=True, visibility='private')
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3WimaxChannel_methods(root_module, cls):
## wimax-channel.h (module 'wimax'): ns3::WimaxChannel::WimaxChannel(ns3::WimaxChannel const & arg0) [constructor]
cls.add_constructor([param('ns3::WimaxChannel const &', 'arg0')])
## wimax-channel.h (module 'wimax'): ns3::WimaxChannel::WimaxChannel() [constructor]
cls.add_constructor([])
## wimax-channel.h (module 'wimax'): int64_t ns3::WimaxChannel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True, is_pure_virtual=True)
## wimax-channel.h (module 'wimax'): void ns3::WimaxChannel::Attach(ns3::Ptr<ns3::WimaxPhy> phy) [member function]
cls.add_method('Attach',
'void',
[param('ns3::Ptr< ns3::WimaxPhy >', 'phy')])
## wimax-channel.h (module 'wimax'): ns3::Ptr<ns3::NetDevice> ns3::WimaxChannel::GetDevice(std::size_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('std::size_t', 'i')],
is_const=True, is_virtual=True)
## wimax-channel.h (module 'wimax'): std::size_t ns3::WimaxChannel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'std::size_t',
[],
is_const=True, is_virtual=True)
## wimax-channel.h (module 'wimax'): static ns3::TypeId ns3::WimaxChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-channel.h (module 'wimax'): void ns3::WimaxChannel::DoAttach(ns3::Ptr<ns3::WimaxPhy> phy) [member function]
cls.add_method('DoAttach',
'void',
[param('ns3::Ptr< ns3::WimaxPhy >', 'phy')],
is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-channel.h (module 'wimax'): ns3::Ptr<ns3::NetDevice> ns3::WimaxChannel::DoGetDevice(std::size_t i) const [member function]
cls.add_method('DoGetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('std::size_t', 'i')],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-channel.h (module 'wimax'): std::size_t ns3::WimaxChannel::DoGetNDevices() const [member function]
cls.add_method('DoGetNDevices',
'std::size_t',
[],
is_const=True, is_virtual=True, is_pure_virtual=True, visibility='private')
return
def register_Ns3WimaxNetDevice_methods(root_module, cls):
## wimax-net-device.h (module 'wimax'): static ns3::TypeId ns3::WimaxNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::WimaxNetDevice() [constructor]
cls.add_constructor([])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetTtg(uint16_t ttg) [member function]
cls.add_method('SetTtg',
'void',
[param('uint16_t', 'ttg')])
## wimax-net-device.h (module 'wimax'): uint16_t ns3::WimaxNetDevice::GetTtg() const [member function]
cls.add_method('GetTtg',
'uint16_t',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetRtg(uint16_t rtg) [member function]
cls.add_method('SetRtg',
'void',
[param('uint16_t', 'rtg')])
## wimax-net-device.h (module 'wimax'): uint16_t ns3::WimaxNetDevice::GetRtg() const [member function]
cls.add_method('GetRtg',
'uint16_t',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::Attach(ns3::Ptr<ns3::WimaxChannel> channel) [member function]
cls.add_method('Attach',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'channel')])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetPhy(ns3::Ptr<ns3::WimaxPhy> phy) [member function]
cls.add_method('SetPhy',
'void',
[param('ns3::Ptr< ns3::WimaxPhy >', 'phy')])
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxNetDevice::GetPhy() const [member function]
cls.add_method('GetPhy',
'ns3::Ptr< ns3::WimaxPhy >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetChannel(ns3::Ptr<ns3::WimaxChannel> wimaxChannel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'wimaxChannel')])
## wimax-net-device.h (module 'wimax'): uint64_t ns3::WimaxNetDevice::GetChannel(uint8_t index) const [member function]
cls.add_method('GetChannel',
'uint64_t',
[param('uint8_t', 'index')],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetNrFrames(uint32_t nrFrames) [member function]
cls.add_method('SetNrFrames',
'void',
[param('uint32_t', 'nrFrames')])
## wimax-net-device.h (module 'wimax'): uint32_t ns3::WimaxNetDevice::GetNrFrames() const [member function]
cls.add_method('GetNrFrames',
'uint32_t',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetMacAddress(ns3::Mac48Address address) [member function]
cls.add_method('SetMacAddress',
'void',
[param('ns3::Mac48Address', 'address')])
## wimax-net-device.h (module 'wimax'): ns3::Mac48Address ns3::WimaxNetDevice::GetMacAddress() const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetState(uint8_t state) [member function]
cls.add_method('SetState',
'void',
[param('uint8_t', 'state')])
## wimax-net-device.h (module 'wimax'): uint8_t ns3::WimaxNetDevice::GetState() const [member function]
cls.add_method('GetState',
'uint8_t',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::WimaxNetDevice::GetInitialRangingConnection() const [member function]
cls.add_method('GetInitialRangingConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::WimaxNetDevice::GetBroadcastConnection() const [member function]
cls.add_method('GetBroadcastConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetCurrentDcd(ns3::Dcd dcd) [member function]
cls.add_method('SetCurrentDcd',
'void',
[param('ns3::Dcd', 'dcd')])
## wimax-net-device.h (module 'wimax'): ns3::Dcd ns3::WimaxNetDevice::GetCurrentDcd() const [member function]
cls.add_method('GetCurrentDcd',
'ns3::Dcd',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetCurrentUcd(ns3::Ucd ucd) [member function]
cls.add_method('SetCurrentUcd',
'void',
[param('ns3::Ucd', 'ucd')])
## wimax-net-device.h (module 'wimax'): ns3::Ucd ns3::WimaxNetDevice::GetCurrentUcd() const [member function]
cls.add_method('GetCurrentUcd',
'ns3::Ucd',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::ConnectionManager> ns3::WimaxNetDevice::GetConnectionManager() const [member function]
cls.add_method('GetConnectionManager',
'ns3::Ptr< ns3::ConnectionManager >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetConnectionManager(ns3::Ptr<ns3::ConnectionManager> connectionManager) [member function]
cls.add_method('SetConnectionManager',
'void',
[param('ns3::Ptr< ns3::ConnectionManager >', 'connectionManager')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::BurstProfileManager> ns3::WimaxNetDevice::GetBurstProfileManager() const [member function]
cls.add_method('GetBurstProfileManager',
'ns3::Ptr< ns3::BurstProfileManager >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetBurstProfileManager(ns3::Ptr<ns3::BurstProfileManager> burstProfileManager) [member function]
cls.add_method('SetBurstProfileManager',
'void',
[param('ns3::Ptr< ns3::BurstProfileManager >', 'burstProfileManager')])
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::BandwidthManager> ns3::WimaxNetDevice::GetBandwidthManager() const [member function]
cls.add_method('GetBandwidthManager',
'ns3::Ptr< ns3::BandwidthManager >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetBandwidthManager(ns3::Ptr<ns3::BandwidthManager> bandwidthManager) [member function]
cls.add_method('SetBandwidthManager',
'void',
[param('ns3::Ptr< ns3::BandwidthManager >', 'bandwidthManager')])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::CreateDefaultConnections() [member function]
cls.add_method('CreateDefaultConnections',
'void',
[])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::Start() [member function]
cls.add_method('Start',
'void',
[],
is_virtual=True, is_pure_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True, is_pure_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetReceiveCallback() [member function]
cls.add_method('SetReceiveCallback',
'void',
[])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::ForwardUp(ns3::Ptr<ns3::Packet> packet, ns3::Mac48Address const & source, ns3::Mac48Address const & dest) [member function]
cls.add_method('ForwardUp',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Mac48Address const &', 'source'), param('ns3::Mac48Address const &', 'dest')])
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::Ptr<ns3::WimaxConnection> connection) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection')],
is_virtual=True, is_pure_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::ForwardDown(ns3::Ptr<ns3::PacketBurst> burst, ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('ForwardDown',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst'), param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::m_direction [variable]
cls.add_static_attribute('m_direction', 'uint8_t', is_const=False)
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::m_frameStartTime [variable]
cls.add_static_attribute('m_frameStartTime', 'ns3::Time', is_const=False)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetName(std::string const name) [member function]
cls.add_method('SetName',
'void',
[param('std::string const', 'name')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): std::string ns3::WimaxNetDevice::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): uint32_t ns3::WimaxNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::Channel> ns3::WimaxNetDevice::GetPhyChannel() const [member function]
cls.add_method('GetPhyChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::Channel> ns3::WimaxNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): uint16_t ns3::WimaxNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetMulticast() const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::MakeMulticastAddress(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('MakeMulticastAddress',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::Node> ns3::WimaxNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetReceiveCallback(ns3::NetDevice::ReceiveCallback cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetPromiscReceiveCallback(ns3::NetDevice::PromiscReceiveCallback cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::NetDevice::PromiscReceiveCallback ns3::WimaxNetDevice::GetPromiscReceiveCallback() [member function]
cls.add_method('GetPromiscReceiveCallback',
'ns3::NetDevice::PromiscReceiveCallback',
[])
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::m_traceRx [variable]
cls.add_instance_attribute('m_traceRx', 'ns3::TracedCallback< ns3::Ptr< ns3::Packet const >, ns3::Mac48Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', is_const=False)
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::m_traceTx [variable]
cls.add_instance_attribute('m_traceTx', 'ns3::TracedCallback< ns3::Ptr< ns3::Packet const >, ns3::Mac48Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', is_const=False)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsPromisc() [member function]
cls.add_method('IsPromisc',
'bool',
[])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::NotifyPromiscTrace(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('NotifyPromiscTrace',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::DoSend(ns3::Ptr<ns3::Packet> packet, ns3::Mac48Address const & source, ns3::Mac48Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('DoSend',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Mac48Address const &', 'source'), param('ns3::Mac48Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::DoReceive(ns3::Ptr<ns3::Packet> packet) [member function]
cls.add_method('DoReceive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet')],
is_virtual=True, is_pure_virtual=True, visibility='private')
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxChannel> ns3::WimaxNetDevice::DoGetChannel() const [member function]
cls.add_method('DoGetChannel',
'ns3::Ptr< ns3::WimaxChannel >',
[],
is_const=True, is_virtual=True, visibility='private')
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3BaseStationNetDevice_methods(root_module, cls):
## bs-net-device.h (module 'wimax'): static ns3::TypeId ns3::BaseStationNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::BaseStationNetDevice() [constructor]
cls.add_constructor([])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::BaseStationNetDevice(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::WimaxPhy> phy) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::WimaxPhy >', 'phy')])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::BaseStationNetDevice(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::WimaxPhy> phy, ns3::Ptr<ns3::UplinkScheduler> uplinkScheduler, ns3::Ptr<ns3::BSScheduler> bsScheduler) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::WimaxPhy >', 'phy'), param('ns3::Ptr< ns3::UplinkScheduler >', 'uplinkScheduler'), param('ns3::Ptr< ns3::BSScheduler >', 'bsScheduler')])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetInitialRangingInterval(ns3::Time initialRangInterval) [member function]
cls.add_method('SetInitialRangingInterval',
'void',
[param('ns3::Time', 'initialRangInterval')])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::InitBaseStationNetDevice() [member function]
cls.add_method('InitBaseStationNetDevice',
'void',
[])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetInitialRangingInterval() const [member function]
cls.add_method('GetInitialRangingInterval',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetDcdInterval(ns3::Time dcdInterval) [member function]
cls.add_method('SetDcdInterval',
'void',
[param('ns3::Time', 'dcdInterval')])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetDcdInterval() const [member function]
cls.add_method('GetDcdInterval',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetUcdInterval(ns3::Time ucdInterval) [member function]
cls.add_method('SetUcdInterval',
'void',
[param('ns3::Time', 'ucdInterval')])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetUcdInterval() const [member function]
cls.add_method('GetUcdInterval',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetIntervalT8(ns3::Time interval) [member function]
cls.add_method('SetIntervalT8',
'void',
[param('ns3::Time', 'interval')])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetIntervalT8() const [member function]
cls.add_method('GetIntervalT8',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetMaxRangingCorrectionRetries(uint8_t maxRangCorrectionRetries) [member function]
cls.add_method('SetMaxRangingCorrectionRetries',
'void',
[param('uint8_t', 'maxRangCorrectionRetries')])
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetMaxRangingCorrectionRetries() const [member function]
cls.add_method('GetMaxRangingCorrectionRetries',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetMaxInvitedRangRetries(uint8_t maxInvitedRangRetries) [member function]
cls.add_method('SetMaxInvitedRangRetries',
'void',
[param('uint8_t', 'maxInvitedRangRetries')])
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetMaxInvitedRangRetries() const [member function]
cls.add_method('GetMaxInvitedRangRetries',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetRangReqOppSize(uint8_t rangReqOppSize) [member function]
cls.add_method('SetRangReqOppSize',
'void',
[param('uint8_t', 'rangReqOppSize')])
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetRangReqOppSize() const [member function]
cls.add_method('GetRangReqOppSize',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetBwReqOppSize(uint8_t bwReqOppSize) [member function]
cls.add_method('SetBwReqOppSize',
'void',
[param('uint8_t', 'bwReqOppSize')])
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetBwReqOppSize() const [member function]
cls.add_method('GetBwReqOppSize',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetNrDlSymbols(uint32_t dlSymbols) [member function]
cls.add_method('SetNrDlSymbols',
'void',
[param('uint32_t', 'dlSymbols')])
## bs-net-device.h (module 'wimax'): uint32_t ns3::BaseStationNetDevice::GetNrDlSymbols() const [member function]
cls.add_method('GetNrDlSymbols',
'uint32_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetNrUlSymbols(uint32_t ulSymbols) [member function]
cls.add_method('SetNrUlSymbols',
'void',
[param('uint32_t', 'ulSymbols')])
## bs-net-device.h (module 'wimax'): uint32_t ns3::BaseStationNetDevice::GetNrUlSymbols() const [member function]
cls.add_method('GetNrUlSymbols',
'uint32_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): uint32_t ns3::BaseStationNetDevice::GetNrDcdSent() const [member function]
cls.add_method('GetNrDcdSent',
'uint32_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): uint32_t ns3::BaseStationNetDevice::GetNrUcdSent() const [member function]
cls.add_method('GetNrUcdSent',
'uint32_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetDlSubframeStartTime() const [member function]
cls.add_method('GetDlSubframeStartTime',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetUlSubframeStartTime() const [member function]
cls.add_method('GetUlSubframeStartTime',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetRangingOppNumber() const [member function]
cls.add_method('GetRangingOppNumber',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::SSManager> ns3::BaseStationNetDevice::GetSSManager() const [member function]
cls.add_method('GetSSManager',
'ns3::Ptr< ns3::SSManager >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetSSManager(ns3::Ptr<ns3::SSManager> ssManager) [member function]
cls.add_method('SetSSManager',
'void',
[param('ns3::Ptr< ns3::SSManager >', 'ssManager')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::UplinkScheduler> ns3::BaseStationNetDevice::GetUplinkScheduler() const [member function]
cls.add_method('GetUplinkScheduler',
'ns3::Ptr< ns3::UplinkScheduler >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetUplinkScheduler(ns3::Ptr<ns3::UplinkScheduler> ulScheduler) [member function]
cls.add_method('SetUplinkScheduler',
'void',
[param('ns3::Ptr< ns3::UplinkScheduler >', 'ulScheduler')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::BSLinkManager> ns3::BaseStationNetDevice::GetLinkManager() const [member function]
cls.add_method('GetLinkManager',
'ns3::Ptr< ns3::BSLinkManager >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetBSScheduler(ns3::Ptr<ns3::BSScheduler> bsSchedule) [member function]
cls.add_method('SetBSScheduler',
'void',
[param('ns3::Ptr< ns3::BSScheduler >', 'bsSchedule')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::BSScheduler> ns3::BaseStationNetDevice::GetBSScheduler() const [member function]
cls.add_method('GetBSScheduler',
'ns3::Ptr< ns3::BSScheduler >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetLinkManager(ns3::Ptr<ns3::BSLinkManager> linkManager) [member function]
cls.add_method('SetLinkManager',
'void',
[param('ns3::Ptr< ns3::BSLinkManager >', 'linkManager')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::IpcsClassifier> ns3::BaseStationNetDevice::GetBsClassifier() const [member function]
cls.add_method('GetBsClassifier',
'ns3::Ptr< ns3::IpcsClassifier >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetBsClassifier(ns3::Ptr<ns3::IpcsClassifier> classifier) [member function]
cls.add_method('SetBsClassifier',
'void',
[param('ns3::Ptr< ns3::IpcsClassifier >', 'classifier')])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetPsDuration() const [member function]
cls.add_method('GetPsDuration',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetSymbolDuration() const [member function]
cls.add_method('GetSymbolDuration',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::Start() [member function]
cls.add_method('Start',
'void',
[],
is_virtual=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## bs-net-device.h (module 'wimax'): bool ns3::BaseStationNetDevice::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::Ptr<ns3::WimaxConnection> connection) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection')],
is_virtual=True)
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::BaseStationNetDevice::GetConnection(ns3::Cid cid) [member function]
cls.add_method('GetConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[param('ns3::Cid', 'cid')])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::MarkUplinkAllocations() [member function]
cls.add_method('MarkUplinkAllocations',
'void',
[])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::MarkRangingOppStart(ns3::Time rangingOppStartTime) [member function]
cls.add_method('MarkRangingOppStart',
'void',
[param('ns3::Time', 'rangingOppStartTime')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::BsServiceFlowManager> ns3::BaseStationNetDevice::GetServiceFlowManager() const [member function]
cls.add_method('GetServiceFlowManager',
'ns3::Ptr< ns3::BsServiceFlowManager >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetServiceFlowManager(ns3::Ptr<ns3::BsServiceFlowManager> arg0) [member function]
cls.add_method('SetServiceFlowManager',
'void',
[param('ns3::Ptr< ns3::BsServiceFlowManager >', 'arg0')])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='private')
## bs-net-device.h (module 'wimax'): bool ns3::BaseStationNetDevice::DoSend(ns3::Ptr<ns3::Packet> packet, ns3::Mac48Address const & source, ns3::Mac48Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('DoSend',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Mac48Address const &', 'source'), param('ns3::Mac48Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True, visibility='private')
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::DoReceive(ns3::Ptr<ns3::Packet> packet) [member function]
cls.add_method('DoReceive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet')],
is_virtual=True, visibility='private')
return
def register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): ns3::ObjectBase * ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator]
cls.add_method('operator()',
'ns3::ObjectBase *',
[],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3CallbackImpl__Void_Bool_Unsigned_long_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, bool, unsigned long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(bool arg0, long unsigned int arg1) [member operator]
cls.add_method('operator()',
'void',
[param('bool', 'arg0'), param('long unsigned int', 'arg1')],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Const_ns3Mac48Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, const ns3::Mac48Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, const ns3::Mac48Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, const ns3::Mac48Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet const >, ns3::Mac48Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, const ns3::Mac48Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, const ns3::Mac48Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, const ns3::Mac48Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<const ns3::Packet> arg0, ns3::Mac48Address const & arg1) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'arg0'), param('ns3::Mac48Address const &', 'arg1')],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Mac48Address_Const_ns3Cid___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, const ns3::Cid &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, const ns3::Cid &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, const ns3::Cid &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet const >, ns3::Mac48Address, ns3::Cid const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, const ns3::Cid &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, const ns3::Cid &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, const ns3::Cid &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<const ns3::Packet> arg0, ns3::Mac48Address arg1, ns3::Cid const & arg2) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'arg0'), param('ns3::Mac48Address', 'arg1'), param('ns3::Cid const &', 'arg2')],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Mac48Address_Ns3Cid_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet const >, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::Mac48Address, ns3::Cid, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<const ns3::Packet> arg0, ns3::Mac48Address arg1, ns3::Cid arg2) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'arg0'), param('ns3::Mac48Address', 'arg1'), param('ns3::Cid', 'arg2')],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<const ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<const ns3::Packet> arg0) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'arg0')],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3PacketBurst__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::PacketBurst const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<const ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<const ns3::PacketBurst> arg0) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::PacketBurst const >', 'arg0')],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::NetDevice> arg0, ns3::Ptr<const ns3::Packet> arg1, short unsigned int arg2, ns3::Address const & arg3, ns3::Address const & arg4, ns3::NetDevice::PacketType arg5) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'arg0'), param('ns3::Ptr< ns3::Packet const >', 'arg1'), param('short unsigned int', 'arg2'), param('ns3::Address const &', 'arg3'), param('ns3::Address const &', 'arg4'), param('ns3::NetDevice::PacketType', 'arg5')],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::NetDevice> arg0) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'arg0')],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3PacketBurst__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::PacketBurst >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::PacketBurst>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::PacketBurst> arg0) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'arg0')],
custom_name='__call__', is_virtual=True, is_pure_virtual=True)
return
def register_Ns3SimpleOfdmWimaxChannel_methods(root_module, cls):
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel::SimpleOfdmWimaxChannel(ns3::SimpleOfdmWimaxChannel const & arg0) [constructor]
cls.add_constructor([param('ns3::SimpleOfdmWimaxChannel const &', 'arg0')])
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel::SimpleOfdmWimaxChannel() [constructor]
cls.add_constructor([])
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel::SimpleOfdmWimaxChannel(ns3::SimpleOfdmWimaxChannel::PropModel propModel) [constructor]
cls.add_constructor([param('ns3::SimpleOfdmWimaxChannel::PropModel', 'propModel')])
## simple-ofdm-wimax-channel.h (module 'wimax'): int64_t ns3::SimpleOfdmWimaxChannel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## simple-ofdm-wimax-channel.h (module 'wimax'): static ns3::TypeId ns3::SimpleOfdmWimaxChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## simple-ofdm-wimax-channel.h (module 'wimax'): void ns3::SimpleOfdmWimaxChannel::Send(ns3::Time BlockTime, uint32_t burstSize, ns3::Ptr<ns3::WimaxPhy> phy, bool isFirstBlock, bool isLastBlock, uint64_t frequency, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction, double txPowerDbm, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('Send',
'void',
[param('ns3::Time', 'BlockTime'), param('uint32_t', 'burstSize'), param('ns3::Ptr< ns3::WimaxPhy >', 'phy'), param('bool', 'isFirstBlock'), param('bool', 'isLastBlock'), param('uint64_t', 'frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-channel.h (module 'wimax'): void ns3::SimpleOfdmWimaxChannel::SetPropagationModel(ns3::SimpleOfdmWimaxChannel::PropModel propModel) [member function]
cls.add_method('SetPropagationModel',
'void',
[param('ns3::SimpleOfdmWimaxChannel::PropModel', 'propModel')])
## simple-ofdm-wimax-channel.h (module 'wimax'): void ns3::SimpleOfdmWimaxChannel::DoAttach(ns3::Ptr<ns3::WimaxPhy> phy) [member function]
cls.add_method('DoAttach',
'void',
[param('ns3::Ptr< ns3::WimaxPhy >', 'phy')],
is_virtual=True, visibility='private')
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::Ptr<ns3::NetDevice> ns3::SimpleOfdmWimaxChannel::DoGetDevice(std::size_t i) const [member function]
cls.add_method('DoGetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('std::size_t', 'i')],
is_const=True, is_virtual=True, visibility='private')
## simple-ofdm-wimax-channel.h (module 'wimax'): std::size_t ns3::SimpleOfdmWimaxChannel::DoGetNDevices() const [member function]
cls.add_method('DoGetNDevices',
'std::size_t',
[],
is_const=True, is_virtual=True, visibility='private')
return
def register_Ns3SubscriberStationNetDevice_methods(root_module, cls):
## ss-net-device.h (module 'wimax'): static ns3::TypeId ns3::SubscriberStationNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::SubscriberStationNetDevice() [constructor]
cls.add_constructor([])
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::SubscriberStationNetDevice(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::WimaxPhy> phy) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::WimaxPhy >', 'phy')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::InitSubscriberStationNetDevice() [member function]
cls.add_method('InitSubscriberStationNetDevice',
'void',
[])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetLostDlMapInterval(ns3::Time lostDlMapInterval) [member function]
cls.add_method('SetLostDlMapInterval',
'void',
[param('ns3::Time', 'lostDlMapInterval')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetLostDlMapInterval() const [member function]
cls.add_method('GetLostDlMapInterval',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetLostUlMapInterval(ns3::Time lostUlMapInterval) [member function]
cls.add_method('SetLostUlMapInterval',
'void',
[param('ns3::Time', 'lostUlMapInterval')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetLostUlMapInterval() const [member function]
cls.add_method('GetLostUlMapInterval',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetMaxDcdInterval(ns3::Time maxDcdInterval) [member function]
cls.add_method('SetMaxDcdInterval',
'void',
[param('ns3::Time', 'maxDcdInterval')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetMaxDcdInterval() const [member function]
cls.add_method('GetMaxDcdInterval',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetMaxUcdInterval(ns3::Time maxUcdInterval) [member function]
cls.add_method('SetMaxUcdInterval',
'void',
[param('ns3::Time', 'maxUcdInterval')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetMaxUcdInterval() const [member function]
cls.add_method('GetMaxUcdInterval',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT1(ns3::Time interval1) [member function]
cls.add_method('SetIntervalT1',
'void',
[param('ns3::Time', 'interval1')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT1() const [member function]
cls.add_method('GetIntervalT1',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT2(ns3::Time interval2) [member function]
cls.add_method('SetIntervalT2',
'void',
[param('ns3::Time', 'interval2')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT2() const [member function]
cls.add_method('GetIntervalT2',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT3(ns3::Time interval3) [member function]
cls.add_method('SetIntervalT3',
'void',
[param('ns3::Time', 'interval3')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT3() const [member function]
cls.add_method('GetIntervalT3',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT7(ns3::Time interval7) [member function]
cls.add_method('SetIntervalT7',
'void',
[param('ns3::Time', 'interval7')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT7() const [member function]
cls.add_method('GetIntervalT7',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT12(ns3::Time interval12) [member function]
cls.add_method('SetIntervalT12',
'void',
[param('ns3::Time', 'interval12')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT12() const [member function]
cls.add_method('GetIntervalT12',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT20(ns3::Time interval20) [member function]
cls.add_method('SetIntervalT20',
'void',
[param('ns3::Time', 'interval20')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT20() const [member function]
cls.add_method('GetIntervalT20',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT21(ns3::Time interval21) [member function]
cls.add_method('SetIntervalT21',
'void',
[param('ns3::Time', 'interval21')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT21() const [member function]
cls.add_method('GetIntervalT21',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetMaxContentionRangingRetries(uint8_t maxContentionRangingRetries) [member function]
cls.add_method('SetMaxContentionRangingRetries',
'void',
[param('uint8_t', 'maxContentionRangingRetries')])
## ss-net-device.h (module 'wimax'): uint8_t ns3::SubscriberStationNetDevice::GetMaxContentionRangingRetries() const [member function]
cls.add_method('GetMaxContentionRangingRetries',
'uint8_t',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetBasicConnection(ns3::Ptr<ns3::WimaxConnection> basicConnection) [member function]
cls.add_method('SetBasicConnection',
'void',
[param('ns3::Ptr< ns3::WimaxConnection >', 'basicConnection')])
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::SubscriberStationNetDevice::GetBasicConnection() const [member function]
cls.add_method('GetBasicConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetPrimaryConnection(ns3::Ptr<ns3::WimaxConnection> primaryConnection) [member function]
cls.add_method('SetPrimaryConnection',
'void',
[param('ns3::Ptr< ns3::WimaxConnection >', 'primaryConnection')])
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::SubscriberStationNetDevice::GetPrimaryConnection() const [member function]
cls.add_method('GetPrimaryConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): ns3::Cid ns3::SubscriberStationNetDevice::GetBasicCid() const [member function]
cls.add_method('GetBasicCid',
'ns3::Cid',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): ns3::Cid ns3::SubscriberStationNetDevice::GetPrimaryCid() const [member function]
cls.add_method('GetPrimaryCid',
'ns3::Cid',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetModulationType(ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('SetModulationType',
'void',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## ss-net-device.h (module 'wimax'): ns3::WimaxPhy::ModulationType ns3::SubscriberStationNetDevice::GetModulationType() const [member function]
cls.add_method('GetModulationType',
'ns3::WimaxPhy::ModulationType',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetAreManagementConnectionsAllocated(bool areManagementConnectionsAllocated) [member function]
cls.add_method('SetAreManagementConnectionsAllocated',
'void',
[param('bool', 'areManagementConnectionsAllocated')])
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::GetAreManagementConnectionsAllocated() const [member function]
cls.add_method('GetAreManagementConnectionsAllocated',
'bool',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetAreServiceFlowsAllocated(bool areServiceFlowsAllocated) [member function]
cls.add_method('SetAreServiceFlowsAllocated',
'void',
[param('bool', 'areServiceFlowsAllocated')])
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::GetAreServiceFlowsAllocated() const [member function]
cls.add_method('GetAreServiceFlowsAllocated',
'bool',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::SSScheduler> ns3::SubscriberStationNetDevice::GetScheduler() const [member function]
cls.add_method('GetScheduler',
'ns3::Ptr< ns3::SSScheduler >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetScheduler(ns3::Ptr<ns3::SSScheduler> ssScheduler) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::Ptr< ns3::SSScheduler >', 'ssScheduler')])
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::HasServiceFlows() const [member function]
cls.add_method('HasServiceFlows',
'bool',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::Ptr<ns3::WimaxConnection> connection) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection')],
is_virtual=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SendBurst(uint8_t uiuc, uint16_t nrSymbols, ns3::Ptr<ns3::WimaxConnection> connection, ns3::MacHeaderType::HeaderType packetType=::ns3::MacHeaderType::HeaderType::HEADER_TYPE_GENERIC) [member function]
cls.add_method('SendBurst',
'void',
[param('uint8_t', 'uiuc'), param('uint16_t', 'nrSymbols'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection'), param('ns3::MacHeaderType::HeaderType', 'packetType', default_value='::ns3::MacHeaderType::HeaderType::HEADER_TYPE_GENERIC')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::Start() [member function]
cls.add_method('Start',
'void',
[],
is_virtual=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::AddServiceFlow(ns3::ServiceFlow * sf) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'sf')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::AddServiceFlow(ns3::ServiceFlow sf) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow', 'sf')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetTimer(ns3::EventId eventId, ns3::EventId & event) [member function]
cls.add_method('SetTimer',
'void',
[param('ns3::EventId', 'eventId'), param('ns3::EventId &', 'event')])
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::IsRegistered() const [member function]
cls.add_method('IsRegistered',
'bool',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetTimeToAllocation(ns3::Time deferTime) [member function]
cls.add_method('GetTimeToAllocation',
'ns3::Time',
[param('ns3::Time', 'deferTime')])
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::m_linkManager [variable]
cls.add_instance_attribute('m_linkManager', 'ns3::Ptr< ns3::SSLinkManager >', is_const=False)
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::IpcsClassifier> ns3::SubscriberStationNetDevice::GetIpcsClassifier() const [member function]
cls.add_method('GetIpcsClassifier',
'ns3::Ptr< ns3::IpcsClassifier >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIpcsPacketClassifier(ns3::Ptr<ns3::IpcsClassifier> classifier) [member function]
cls.add_method('SetIpcsPacketClassifier',
'void',
[param('ns3::Ptr< ns3::IpcsClassifier >', 'classifier')])
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::SSLinkManager> ns3::SubscriberStationNetDevice::GetLinkManager() const [member function]
cls.add_method('GetLinkManager',
'ns3::Ptr< ns3::SSLinkManager >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetLinkManager(ns3::Ptr<ns3::SSLinkManager> linkManager) [member function]
cls.add_method('SetLinkManager',
'void',
[param('ns3::Ptr< ns3::SSLinkManager >', 'linkManager')])
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::SsServiceFlowManager> ns3::SubscriberStationNetDevice::GetServiceFlowManager() const [member function]
cls.add_method('GetServiceFlowManager',
'ns3::Ptr< ns3::SsServiceFlowManager >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetServiceFlowManager(ns3::Ptr<ns3::SsServiceFlowManager> sfm) [member function]
cls.add_method('SetServiceFlowManager',
'void',
[param('ns3::Ptr< ns3::SsServiceFlowManager >', 'sfm')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='private')
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::DoSend(ns3::Ptr<ns3::Packet> packet, ns3::Mac48Address const & source, ns3::Mac48Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('DoSend',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Mac48Address const &', 'source'), param('ns3::Mac48Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True, visibility='private')
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::DoReceive(ns3::Ptr<ns3::Packet> packet) [member function]
cls.add_method('DoReceive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet')],
is_virtual=True, visibility='private')
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True, is_pure_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True, is_pure_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## crc8.h (module 'wimax'): uint8_t ns3::CRC8Calculate(uint8_t const * data, int length) [free function]
module.add_function('CRC8Calculate',
'uint8_t',
[param('uint8_t const *', 'data'), param('int', 'length')])
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.add_cpp_namespace('internal'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.