repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
demisto/content | Packs/Nmap/Integrations/Nmap/Nmap.py | 1 | 2190 | import demistomock as demisto
from CommonServerPython import *
from libnmap.process import NmapProcess
from libnmap.parser import NmapParser
from libnmap.reportjson import ReportEncoder
if demisto.command() == 'test-module':
demisto.results('ok')
sys.exit(0)
if demisto.command() == 'nmap-scan':
nm = NmapProcess(argToList(demisto.args()['targets']), options=demisto.args()['options'])
rc = nm.run()
if rc != 0:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': 'Unable to execute - ' + nm.stderr
})
sys.exit(0)
r = NmapParser.parse(nm.stdout)
md = '## ' + r.summary + '\n'
hosts = []
try:
scan_type = r.scan_type
except KeyError:
scan_type = None
for host in r.hosts:
h = {}
if len(host.hostnames):
tmp_host = host.hostnames.pop()
h['Hostname'] = tmp_host
else:
tmp_host = host.address
h['Address'] = host.address
h['Status'] = host.status
svc = []
md += "### Nmap scan report for {0}".format(tmp_host) + \
(" ({0})\n".format(host.address) if tmp_host != host.address else "\n")
md += "#### Host is {0}.\n".format(host.status)
for serv in host.services:
svc.append({
'Port': serv.port,
'Protocol': serv.protocol,
'State': serv.state,
'Service': serv.service,
'Banner': serv.banner
})
md += tableToMarkdown('Services', svc, ['Port', 'Protocol', 'State', 'Service', 'Banner'])
h['Services'] = svc
hosts.append(h)
scan = {
'Summary': r.summary,
'Version': r.version,
'Started': r.started,
'Ended': r.endtime,
'CommandLine': r.commandline,
'ScanType': scan_type,
'Hosts': hosts}
demisto.results({
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': json.dumps(r, cls=ReportEncoder),
'HumanReadable': md,
'EntryContext': {'NMAP.Scan': scan}
})
| mit |
mayankcu/Django-social | social_auth/backends/google.py | 7 | 9621 | """
Google OpenID and OAuth support
OAuth works straightforward using anonymous configurations, username
is generated by requesting email to the not documented, googleapis.com
service. Registered applications can define settings GOOGLE_CONSUMER_KEY
and GOOGLE_CONSUMER_SECRET and they will be used in the auth process.
Setting GOOGLE_OAUTH_EXTRA_SCOPE can be used to access different user
related data, like calendar, contacts, docs, etc.
OAuth2 works similar to OAuth but application must be defined on Google
APIs console https://code.google.com/apis/console/ Identity option.
OpenID also works straightforward, it doesn't need further configurations.
"""
from urllib import urlencode
from urllib2 import Request
from oauth2 import Request as OAuthRequest
from django.utils import simplejson
from social_auth.utils import setting, dsa_urlopen
from social_auth.backends import OpenIdAuth, ConsumerBasedOAuth, BaseOAuth2, \
OAuthBackend, OpenIDBackend, USERNAME
from social_auth.backends.exceptions import AuthFailed
# Google OAuth base configuration
GOOGLE_OAUTH_SERVER = 'www.google.com'
AUTHORIZATION_URL = 'https://www.google.com/accounts/OAuthAuthorizeToken'
REQUEST_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetRequestToken'
ACCESS_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetAccessToken'
# Google OAuth2 base configuration
GOOGLE_OAUTH2_SERVER = 'accounts.google.com'
GOOGLE_OATUH2_AUTHORIZATION_URL = 'https://accounts.google.com/o/oauth2/auth'
# scope for user email, specify extra scopes in settings, for example:
# GOOGLE_OAUTH_EXTRA_SCOPE = ['https://www.google.com/m8/feeds/']
GOOGLE_OAUTH_SCOPE = ['https://www.googleapis.com/auth/userinfo#email']
GOOGLE_OAUTH2_SCOPE = ['https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile']
GOOGLEAPIS_EMAIL = 'https://www.googleapis.com/userinfo/email'
GOOGLEAPIS_PROFILE = 'https://www.googleapis.com/oauth2/v1/userinfo'
GOOGLE_OPENID_URL = 'https://www.google.com/accounts/o8/id'
# Backends
class GoogleOAuthBackend(OAuthBackend):
"""Google OAuth authentication backend"""
name = 'google-oauth'
def get_user_id(self, details, response):
"""Use google email as unique id"""
validate_whitelists(self, details['email'])
return details['email']
def get_user_details(self, response):
"""Return user details from Orkut account"""
email = response.get('email', '')
return {USERNAME: email.split('@', 1)[0],
'email': email,
'fullname': '',
'first_name': '',
'last_name': ''}
class GoogleOAuth2Backend(GoogleOAuthBackend):
"""Google OAuth2 authentication backend"""
name = 'google-oauth2'
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('expires_in', setting('SOCIAL_AUTH_EXPIRATION', 'expires'))
]
def get_user_id(self, details, response):
"""Use google email or id as unique id"""
user_id = super(GoogleOAuth2Backend, self).get_user_id(details,
response)
if setting('GOOGLE_OAUTH2_USE_UNIQUE_USER_ID', False):
return response['id']
return user_id
def get_user_details(self, response):
email = response.get('email', '')
return {USERNAME: email.split('@', 1)[0],
'email': email,
'fullname': response.get('name', ''),
'first_name': response.get('given_name', ''),
'last_name': response.get('family_name', '')}
class GoogleBackend(OpenIDBackend):
"""Google OpenID authentication backend"""
name = 'google'
def get_user_id(self, details, response):
"""
Return user unique id provided by service. For google user email
is unique enought to flag a single user. Email comes from schema:
http://axschema.org/contact/email
"""
validate_whitelists(self, details['email'])
return details['email']
# Auth classes
class GoogleAuth(OpenIdAuth):
"""Google OpenID authentication"""
AUTH_BACKEND = GoogleBackend
def openid_url(self):
"""Return Google OpenID service url"""
return GOOGLE_OPENID_URL
class BaseGoogleOAuth(ConsumerBasedOAuth):
"""Base class for Google OAuth mechanism"""
AUTHORIZATION_URL = AUTHORIZATION_URL
REQUEST_TOKEN_URL = REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = ACCESS_TOKEN_URL
SERVER_URL = GOOGLE_OAUTH_SERVER
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from G service"""
raise NotImplementedError('Implement in subclass')
class GoogleOAuth(BaseGoogleOAuth):
"""Google OAuth authorization mechanism"""
AUTH_BACKEND = GoogleOAuthBackend
SETTINGS_KEY_NAME = 'GOOGLE_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'GOOGLE_CONSUMER_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Google API"""
request = self.oauth_request(access_token, GOOGLEAPIS_EMAIL,
{'alt': 'json'})
url, params = request.to_url().split('?', 1)
return googleapis_email(url, params)
def oauth_authorization_request(self, token):
"""Generate OAuth request to authorize token."""
return OAuthRequest.from_consumer_and_token(self.consumer,
token=token,
http_url=self.AUTHORIZATION_URL)
def oauth_request(self, token, url, extra_params=None):
extra_params = extra_params or {}
scope = GOOGLE_OAUTH_SCOPE + setting('GOOGLE_OAUTH_EXTRA_SCOPE', [])
extra_params.update({
'scope': ' '.join(scope),
})
if not self.registered():
xoauth_displayname = setting('GOOGLE_DISPLAY_NAME', 'Social Auth')
extra_params['xoauth_displayname'] = xoauth_displayname
return super(GoogleOAuth, self).oauth_request(token, url, extra_params)
@classmethod
def get_key_and_secret(cls):
"""Return Google OAuth Consumer Key and Consumer Secret pair, uses
anonymous by default, beware that this marks the application as not
registered and a security badge is displayed on authorization page.
http://code.google.com/apis/accounts/docs/OAuth_ref.html#SigningOAuth
"""
try:
return super(GoogleOAuth, cls).get_key_and_secret()
except AttributeError:
return 'anonymous', 'anonymous'
@classmethod
def enabled(cls):
"""Google OAuth is always enabled because of anonymous access"""
return True
def registered(self):
"""Check if Google OAuth Consumer Key and Consumer Secret are set"""
return self.get_key_and_secret() != ('anonymous', 'anonymous')
# TODO: Remove this setting name check, keep for backward compatibility
_OAUTH2_KEY_NAME = setting('GOOGLE_OAUTH2_CLIENT_ID') and \
'GOOGLE_OAUTH2_CLIENT_ID' or \
'GOOGLE_OAUTH2_CLIENT_KEY'
class GoogleOAuth2(BaseOAuth2):
"""Google OAuth2 support"""
AUTH_BACKEND = GoogleOAuth2Backend
AUTHORIZATION_URL = 'https://accounts.google.com/o/oauth2/auth'
ACCESS_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
SETTINGS_KEY_NAME = _OAUTH2_KEY_NAME
SETTINGS_SECRET_NAME = 'GOOGLE_OAUTH2_CLIENT_SECRET'
SCOPE_VAR_NAME = 'GOOGLE_OAUTH_EXTRA_SCOPE'
DEFAULT_SCOPE = GOOGLE_OAUTH2_SCOPE
REDIRECT_STATE = False
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Google API"""
return googleapis_profile(GOOGLEAPIS_PROFILE, access_token)
def googleapis_email(url, params):
"""Loads user data from googleapis service, only email so far as it's
described in http://sites.google.com/site/oauthgoog/Home/emaildisplayscope
Parameters must be passed in queryset and Authorization header as described
on Google OAuth documentation at:
http://groups.google.com/group/oauth/browse_thread/thread/d15add9beb418ebc
and: http://code.google.com/apis/accounts/docs/OAuth2.html#CallingAnAPI
"""
request = Request(url + '?' + params, headers={'Authorization': params})
try:
return simplejson.loads(dsa_urlopen(request).read())['data']
except (ValueError, KeyError, IOError):
return None
def googleapis_profile(url, access_token):
"""
Loads user data from googleapis service, such as name, given_name,
family_name, etc. as it's described in:
https://developers.google.com/accounts/docs/OAuth2Login
"""
data = {'access_token': access_token, 'alt': 'json'}
request = Request(url + '?' + urlencode(data))
try:
return simplejson.loads(dsa_urlopen(request).read())
except (ValueError, KeyError, IOError):
return None
def validate_whitelists(backend, email):
"""
Validates allowed domains and emails against the following settings:
GOOGLE_WHITE_LISTED_DOMAINS
GOOGLE_WHITE_LISTED_EMAILS
All domains and emails are allowed if setting is an empty list.
"""
emails = setting('GOOGLE_WHITE_LISTED_EMAILS', [])
domains = setting('GOOGLE_WHITE_LISTED_DOMAINS', [])
if emails and email in emails:
return # you're good
if domains and email.split('@', 1)[1] not in domains:
raise AuthFailed(backend, 'Domain not allowed')
# Backend definition
BACKENDS = {
'google': GoogleAuth,
'google-oauth': GoogleOAuth,
'google-oauth2': GoogleOAuth2,
}
| bsd-3-clause |
MERegistro/meregistro | tests/regressiontests/sites_framework/tests.py | 92 | 1784 | from django.conf import settings
from django.contrib.sites.models import Site
from django.test import TestCase
from models import SyndicatedArticle, ExclusiveArticle, CustomArticle, InvalidArticle, ConfusedArticle
class SitesFrameworkTestCase(TestCase):
def setUp(self):
Site.objects.get_or_create(id=settings.SITE_ID, domain="example.com", name="example.com")
Site.objects.create(id=settings.SITE_ID+1, domain="example2.com", name="example2.com")
def test_site_fk(self):
article = ExclusiveArticle.objects.create(title="Breaking News!", site_id=settings.SITE_ID)
self.assertEqual(ExclusiveArticle.on_site.all().get(), article)
def test_sites_m2m(self):
article = SyndicatedArticle.objects.create(title="Fresh News!")
article.sites.add(Site.objects.get(id=settings.SITE_ID))
article.sites.add(Site.objects.get(id=settings.SITE_ID+1))
article2 = SyndicatedArticle.objects.create(title="More News!")
article2.sites.add(Site.objects.get(id=settings.SITE_ID+1))
self.assertEqual(SyndicatedArticle.on_site.all().get(), article)
def test_custom_named_field(self):
article = CustomArticle.objects.create(title="Tantalizing News!", places_this_article_should_appear_id=settings.SITE_ID)
self.assertEqual(CustomArticle.on_site.all().get(), article)
def test_invalid_name(self):
article = InvalidArticle.objects.create(title="Bad News!", site_id=settings.SITE_ID)
self.assertRaises(ValueError, InvalidArticle.on_site.all)
def test_invalid_field_type(self):
article = ConfusedArticle.objects.create(title="More Bad News!", site=settings.SITE_ID)
self.assertRaises(TypeError, ConfusedArticle.on_site.all)
| bsd-3-clause |
bramwalet/Subliminal.bundle | Contents/Libraries/Shared/subliminal/converters/addic7ed.py | 32 | 1771 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from babelfish import LanguageReverseConverter, language_converters
class Addic7edConverter(LanguageReverseConverter):
def __init__(self):
self.name_converter = language_converters['name']
self.from_addic7ed = {'Català': ('cat',), 'Chinese (Simplified)': ('zho',), 'Chinese (Traditional)': ('zho',),
'Euskera': ('eus',), 'Galego': ('glg',), 'Greek': ('ell',), 'Malay': ('msa',),
'Portuguese (Brazilian)': ('por', 'BR'), 'Serbian (Cyrillic)': ('srp', None, 'Cyrl'),
'Serbian (Latin)': ('srp',), 'Spanish (Latin America)': ('spa',),
'Spanish (Spain)': ('spa',)}
self.to_addic7ed = {('cat',): 'Català', ('zho',): 'Chinese (Simplified)', ('eus',): 'Euskera',
('glg',): 'Galego', ('ell',): 'Greek', ('msa',): 'Malay',
('por', 'BR'): 'Portuguese (Brazilian)', ('srp', None, 'Cyrl'): 'Serbian (Cyrillic)'}
self.codes = self.name_converter.codes | set(self.from_addic7ed.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3, country, script) in self.to_addic7ed:
return self.to_addic7ed[(alpha3, country, script)]
if (alpha3, country) in self.to_addic7ed:
return self.to_addic7ed[(alpha3, country)]
if (alpha3,) in self.to_addic7ed:
return self.to_addic7ed[(alpha3,)]
return self.name_converter.convert(alpha3, country, script)
def reverse(self, addic7ed):
if addic7ed in self.from_addic7ed:
return self.from_addic7ed[addic7ed]
return self.name_converter.reverse(addic7ed)
| mit |
koding/koding | go/src/vendor/github.com/koding/kite/kitectl/release.py | 24 | 7372 | #!/usr/bin/env python2.7
"""
A script for packaging and releasing kite tool for OS X and Linux platforms.
It can also upload the generated package file to S3 if you provide --upload flag.
usage: release.py [-h] [--upload]
Run it at the same directory as this script. It will put the generated files
into the current working directory.
On OS X, the brew formula can be installed with the following command:
brew install kite.rb
On Linux, the deb package can be installed with the following command:
dpkg -i kite-0.0.1-linux.deb
"""
import argparse
import hashlib
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import boto
from boto.s3.key import Key
BREW_FORMULA = """\
require 'formula'
class Kite < Formula
homepage 'http://kite.koding.com'
# url and sha1 needs to be changed after new binary is uploaded.
url '{url}'
sha1 '{sha1}'
def install
bin.install "kite"
end
def test
system "#{{bin}}/kite", "version"
end
end
"""
DEB_CONTROL = """\
Package: kite
Version: {version}
Section: utils
Priority: optional
Architecture: amd64
Essential: no
Maintainer: Koding Developers <[email protected]>
Description: Kite command-line tool.
"""
def build_osx(binpath, version):
print "Making tar file..."
tarname = "kite-%s-osx.tar.gz" % version
with tarfile.open(tarname, "w:gz") as tar:
tar.add(binpath, arcname="kite")
return tarname
def build_linux(binpath, version):
workdir = tempfile.mkdtemp()
try:
debname = "kite-%s-linux" % version
packagedir = os.path.join(workdir, debname)
os.mkdir(packagedir)
debiandir = os.path.join(packagedir, "DEBIAN")
os.mkdir(debiandir)
controlpath = os.path.join(debiandir, "control")
with open(controlpath, "w") as f:
f.write(DEB_CONTROL.format(version=version))
usrdir = os.path.join(packagedir, "usr")
os.mkdir(usrdir)
bindir = os.path.join(usrdir, "bin")
os.mkdir(bindir)
shutil.move(binpath, bindir)
debfile = "%s.deb" % debname
subprocess.check_call(["fakeroot", "dpkg-deb", "--build",
packagedir, debfile])
return debfile
finally:
shutil.rmtree(workdir)
def postbuild_osx(package_name, args, bucket, package_s3_key):
if args.upload:
url = package_s3_key.generate_url(expires_in=0, query_auth=False)
else:
# For testing "brew install" locally
url = "http://127.0.0.1:8000/%s" % package_name
print "Generating formula..."
sha1 = sha1_file(package_name)
formula_str = BREW_FORMULA.format(url=url, sha1=sha1)
with open("kite.rb", "w") as f:
f.write(formula_str)
if args.upload:
print "Uploading new brew formula..."
formula_key = Key(bucket)
formula_key.key = "kite.rb"
formula_key.set_contents_from_string(formula_str)
formula_key.make_public()
formula_url = formula_key.generate_url(expires_in=0, query_auth=False)
print "kite tool has been uplaoded successfully.\n" \
"Users can install it with:\n " \
"brew install \"%s\"" % formula_url
else:
print "Did not upload to S3. " \
"If you want to upload, run with --upload flag."
def postbuild_linux(package_name, args, bucket, package_s3_key):
if args.upload:
print "Uploading again as kite-latest.linux.deb ..."
latest = Key(bucket)
latest.key = "kite-latest-linux.deb"
latest.set_contents_from_filename(package_name)
latest.make_public()
print "Uploaded:", latest.generate_url(expires_in=0, query_auth=False)
def main():
parser = argparse.ArgumentParser(
description="Compile kite tool and upload to S3.")
parser.add_argument('--upload', action='store_true', help="upload to s3")
parser.add_argument('--overwrite', action='store_true', help="overwrite existing package")
args = parser.parse_args()
if args.upload:
aws_key = os.environ['AWS_KEY']
aws_secret = os.environ['AWS_SECRET']
workdir = tempfile.mkdtemp()
try:
tardir = os.path.join(workdir, "kite") # dir to be tarred
os.mkdir(tardir)
binpath = os.path.join(tardir, "kite")
cmd = "go build -o %s %s" % (binpath, "kite/main.go")
env = os.environ.copy()
env["GOARCH"] = "amd64" # we only build for 64-bit
env["CGO_ENABLED"] = "1" # cgo must be enabled for some functions to run correctly
# Decide on platform (osx, linux, etc.)
if sys.platform.startswith("linux"):
env["GOOS"] = "linux"
platform = "linux"
elif sys.platform.startswith("darwin"):
env["GOOS"] = "darwin"
platform = "osx"
else:
print "%s platform is not supported" % sys.platform
sys.exit(1)
# Compile kite tool source code
print "Building for platform: %s" % platform
try:
subprocess.check_call(cmd.split(), env=env)
except subprocess.CalledProcessError:
print "Cannot compile kite tool. Try manually."
sys.exit(1)
# Get the version number from compiled binary
version = subprocess.check_output([binpath, "version"]).strip()
assert len(version.split(".")) == 3, "Please use 3-digits versioning"
print "Version:", version
# Build platform specific package
build_function = globals()["build_%s" % platform]
package = build_function(binpath, version)
if not os.path.exists(package):
print "Build is unsuccessful."
sys.exit(1)
print "Generated package:", package
# Upload to Amazon S3
bucket = package_key = None
if args.upload:
print "Uploading to Amazon S3..."
s3_connection = boto.connect_s3(aws_key, aws_secret)
bucket = s3_connection.get_bucket('kite-cli')
package_key = Key(bucket)
package_key.key = package
if package_key.exists() and not args.overwrite:
print "This version is already uploaded. " \
"Please do not overwrite the uploaded version, " \
"increment the version number and upload it again. " \
"If you must, you can use --overwrite option."
sys.exit(1)
package_key.set_contents_from_filename(package)
package_key.make_public()
url = package_key.generate_url(expires_in=0, query_auth=False)
print "Package is uploaded to S3:", url
# Run post-build actions
postbuild_function = globals().get("postbuild_%s" % platform)
if postbuild_function:
postbuild_function(package, args, bucket, package_key)
finally:
shutil.rmtree(workdir)
def sha1_file(path):
"""Calculate sha1 of path. Read file in chunks."""
assert os.path.isfile(path)
chunk_size = 1024 * 1024 # 1M
sha1_checksum = hashlib.sha1()
with open(path, "rb") as f:
byte = f.read(chunk_size)
while byte:
sha1_checksum.update(byte)
byte = f.read(chunk_size)
return sha1_checksum.hexdigest()
if __name__ == "__main__":
main()
| agpl-3.0 |
bodi000/odoo | addons/l10n_be_hr_payroll/l10n_be_hr_payroll.py | 379 | 3110 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class hr_contract_be(osv.osv):
_inherit = 'hr.contract'
_columns = {
'travel_reimbursement_amount': fields.float('Reimbursement of travel expenses', digits_compute=dp.get_precision('Payroll')),
'car_company_amount': fields.float('Company car employer', digits_compute=dp.get_precision('Payroll')),
'car_employee_deduction': fields.float('Company Car Deduction for Worker', digits_compute=dp.get_precision('Payroll')),
'misc_onss_deduction': fields.float('Miscellaneous exempt ONSS ', digits_compute=dp.get_precision('Payroll')),
'meal_voucher_amount': fields.float('Check Value Meal ', digits_compute=dp.get_precision('Payroll')),
'meal_voucher_employee_deduction': fields.float('Check Value Meal - by worker ', digits_compute=dp.get_precision('Payroll')),
'insurance_employee_deduction': fields.float('Insurance Group - by worker ', digits_compute=dp.get_precision('Payroll')),
'misc_advantage_amount': fields.float('Benefits of various nature ', digits_compute=dp.get_precision('Payroll')),
'additional_net_amount': fields.float('Net supplements', digits_compute=dp.get_precision('Payroll')),
'retained_net_amount': fields.float('Net retained ', digits_compute=dp.get_precision('Payroll')),
}
class hr_employee_be(osv.osv):
_inherit = 'hr.employee'
_columns = {
'spouse_fiscal_status': fields.selection([('without income','Without Income'),('with income','With Income')], 'Tax status for spouse'),
'disabled_spouse_bool': fields.boolean('Disabled Spouse', help="if recipient spouse is declared disabled by law"),
'disabled_children_bool': fields.boolean('Disabled Children', help="if recipient children is/are declared disabled by law"),
'resident_bool': fields.boolean('Nonresident', help="if recipient lives in a foreign country"),
'disabled_children_number': fields.integer('Number of disabled children'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
benschmaus/catapult | third_party/gsutil/third_party/boto/tests/unit/ec2/test_reservedinstance.py | 100 | 1670 | from tests.unit import AWSMockServiceTestCase
from boto.ec2.connection import EC2Connection
from boto.ec2.reservedinstance import ReservedInstance
class TestReservedInstancesSet(AWSMockServiceTestCase):
connection_class = EC2Connection
def default_body(self):
return b"""
<reservedInstancesSet>
<item>
<reservedInstancesId>ididididid</reservedInstancesId>
<instanceType>t1.micro</instanceType>
<start>2014-05-03T14:10:10.944Z</start>
<end>2014-05-03T14:10:11.000Z</end>
<duration>64800000</duration>
<fixedPrice>62.5</fixedPrice>
<usagePrice>0.0</usagePrice>
<instanceCount>5</instanceCount>
<productDescription>Linux/UNIX</productDescription>
<state>retired</state>
<instanceTenancy>default</instanceTenancy>
<currencyCode>USD</currencyCode>
<offeringType>Heavy Utilization</offeringType>
<recurringCharges>
<item>
<frequency>Hourly</frequency>
<amount>0.005</amount>
</item>
</recurringCharges>
</item>
</reservedInstancesSet>"""
def test_get_all_reserved_instaces(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_reserved_instances()
self.assertEqual(len(response), 1)
self.assertTrue(isinstance(response[0], ReservedInstance))
self.assertEquals(response[0].id, 'ididididid')
self.assertEquals(response[0].instance_count, 5)
self.assertEquals(response[0].start, '2014-05-03T14:10:10.944Z')
self.assertEquals(response[0].end, '2014-05-03T14:10:11.000Z')
| bsd-3-clause |
oeeagle/quantum | neutron/plugins/nec/ofc_driver_base.py | 5 | 4998 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
# @author: Akihiro MOTOKI
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class OFCDriverBase(object):
"""OpenFlow Controller (OFC) Driver Specification.
OFCDriverBase defines the minimum set of methods required by this plugin.
It would be better that other methods like update_* are implemented.
"""
@abstractmethod
def create_tenant(self, description, tenant_id=None):
"""Create a new tenant at OpenFlow Controller.
:param description: A description of this tenant.
:param tenant_id: A hint of OFC tenant ID.
A driver could use this id as a OFC id or ignore it.
:returns: ID of the tenant created at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_tenant(self, ofc_tenant_id):
"""Delete a tenant at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def create_network(self, ofc_tenant_id, description, network_id=None):
"""Create a new network on specified OFC tenant at OpenFlow Controller.
:param ofc_tenant_id: a OFC tenant ID in which a new network belongs.
:param description: A description of this network.
:param network_id: A hint of an ID of OFC network.
:returns: ID of the network created at OpenFlow Controller.
ID returned must be unique in the OpenFlow Controller.
If a network is identified in conjunction with other information
such as a tenant ID, such information should be included in the ID.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_network(self, ofc_network_id):
"""Delete a netwrok at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def create_port(self, ofc_network_id, portinfo,
port_id=None):
"""Create a new port on specified network at OFC.
:param ofc_network_id: a OFC tenant ID in which a new port belongs.
:param portinfo: An OpenFlow information of this port.
{'datapath_id': Switch ID that a port connected.
'port_no': Port Number that a port connected on a Swtich.
'vlan_id': VLAN ID that a port tagging.
'mac': Mac address.
}
:param port_id: A hint of an ID of OFC port.
ID returned must be unique in the OpenFlow Controller.
If a port is identified in combination with a network or
a tenant, such information should be included in the ID.
:returns: ID of the port created at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_port(self, ofc_port_id):
"""Delete a port at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def convert_ofc_tenant_id(self, context, ofc_tenant_id):
"""Convert old-style ofc tenand id to new-style one.
:param context: neutron context object
:param ofc_tenant_id: ofc_tenant_id to be converted
"""
pass
@abstractmethod
def convert_ofc_network_id(self, context, ofc_network_id,
tenant_id):
"""Convert old-style ofc network id to new-style one.
:param context: neutron context object
:param ofc_network_id: ofc_network_id to be converted
:param tenant_id: neutron tenant_id of the network
"""
pass
@abstractmethod
def convert_ofc_port_id(self, context, ofc_port_id,
tenant_id, network_id):
"""Convert old-style ofc port id to new-style one.
:param context: neutron context object
:param ofc_port_id: ofc_port_id to be converted
:param tenant_id: neutron tenant_id of the port
:param network_id: neutron network_id of the port
"""
pass
| apache-2.0 |
nju520/pyspider | pyspider/message_queue/kombu_queue.py | 60 | 3271 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2015-05-22 20:54:01
import time
import umsgpack
from kombu import Connection, enable_insecure_serializers
from kombu.serialization import register
from kombu.exceptions import ChannelError
from six.moves import queue as BaseQueue
register('umsgpack', umsgpack.packb, umsgpack.unpackb, 'application/x-msgpack')
enable_insecure_serializers(['umsgpack'])
class KombuQueue(object):
"""
kombu is a high-level interface for multiple message queue backends.
KombuQueue is built on top of kombu API.
"""
Empty = BaseQueue.Empty
Full = BaseQueue.Full
max_timeout = 0.3
def __init__(self, name, url="amqp://", maxsize=0, lazy_limit=True):
"""
Constructor for KombuQueue
url: http://kombu.readthedocs.org/en/latest/userguide/connections.html#urls
maxsize: an integer that sets the upperbound limit on the number of
items that can be placed in the queue.
"""
self.name = name
self.conn = Connection(url)
self.queue = self.conn.SimpleQueue(self.name, no_ack=True, serializer='umsgpack')
self.maxsize = maxsize
self.lazy_limit = lazy_limit
if self.lazy_limit and self.maxsize:
self.qsize_diff_limit = int(self.maxsize * 0.1)
else:
self.qsize_diff_limit = 0
self.qsize_diff = 0
def qsize(self):
try:
return self.queue.qsize()
except ChannelError:
return 0
def empty(self):
if self.qsize() == 0:
return True
else:
return False
def full(self):
if self.maxsize and self.qsize() >= self.maxsize:
return True
else:
return False
def put(self, obj, block=True, timeout=None):
if not block:
return self.put_nowait()
start_time = time.time()
while True:
try:
return self.put_nowait(obj)
except BaseQueue.Full:
if timeout:
lasted = time.time() - start_time
if timeout > lasted:
time.sleep(min(self.max_timeout, timeout - lasted))
else:
raise
else:
time.sleep(self.max_timeout)
def put_nowait(self, obj):
if self.lazy_limit and self.qsize_diff < self.qsize_diff_limit:
pass
elif self.full():
raise BaseQueue.Full
else:
self.qsize_diff = 0
return self.queue.put(obj)
def get(self, block=True, timeout=None):
try:
ret = self.queue.get(block, timeout)
return ret.payload
except self.queue.Empty:
raise BaseQueue.Empty
def get_nowait(self):
try:
ret = self.queue.get_nowait()
return ret.payload
except self.queue.Empty:
raise BaseQueue.Empty
def delete(self):
self.queue.queue.delete()
def __del__(self):
self.queue.close()
Queue = KombuQueue
| apache-2.0 |
S01780/python-social-auth | social/backends/appsfuel.py | 83 | 1502 | """
Appsfueld OAuth2 backend (with sandbox mode support), docs at:
http://psa.matiasaguirre.net/docs/backends/appsfuel.html
"""
from social.backends.oauth import BaseOAuth2
class AppsfuelOAuth2(BaseOAuth2):
name = 'appsfuel'
ID_KEY = 'user_id'
AUTHORIZATION_URL = 'http://app.appsfuel.com/content/permission'
ACCESS_TOKEN_URL = 'https://api.appsfuel.com/v1/live/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
USER_DETAILS_URL = 'https://api.appsfuel.com/v1/live/user'
def get_user_details(self, response):
"""Return user details from Appsfuel account"""
email = response.get('email', '')
username = email.split('@')[0] if email else ''
fullname, first_name, last_name = self.get_user_names(
response.get('display_name', '')
)
return {
'username': username,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
'email': email
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(self.USER_DETAILS_URL, params={
'access_token': access_token
})
class AppsfuelOAuth2Sandbox(AppsfuelOAuth2):
name = 'appsfuel-sandbox'
AUTHORIZATION_URL = 'https://api.appsfuel.com/v1/sandbox/choose'
ACCESS_TOKEN_URL = 'https://api.appsfuel.com/v1/sandbox/oauth/token'
USER_DETAILS_URL = 'https://api.appsfuel.com/v1/sandbox/user'
| bsd-3-clause |
dacjames/scrapy | tests/test_engine.py | 92 | 8653 | """
Scrapy engine tests
This starts a testing web server (using twisted.server.Site) and then crawls it
with the Scrapy crawler.
To view the testing web server in a browser you can start it by running this
module with the ``runserver`` argument::
python test_engine.py runserver
"""
from __future__ import print_function
import sys, os, re
from six.moves.urllib.parse import urlparse
from twisted.internet import reactor, defer
from twisted.web import server, static, util
from twisted.trial import unittest
from scrapy import signals
from scrapy.utils.test import get_crawler
from pydispatch import dispatcher
from tests import tests_datadir
from scrapy.spiders import Spider
from scrapy.item import Item, Field
from scrapy.linkextractors import LinkExtractor
from scrapy.http import Request
from scrapy.utils.signal import disconnect_all
class TestItem(Item):
name = Field()
url = Field()
price = Field()
class TestSpider(Spider):
name = "scrapytest.org"
allowed_domains = ["scrapytest.org", "localhost"]
itemurl_re = re.compile("item\d+.html")
name_re = re.compile("<h1>(.*?)</h1>", re.M)
price_re = re.compile(">Price: \$(.*?)<", re.M)
item_cls = TestItem
def parse(self, response):
xlink = LinkExtractor()
itemre = re.compile(self.itemurl_re)
for link in xlink.extract_links(response):
if itemre.search(link.url):
yield Request(url=link.url, callback=self.parse_item)
def parse_item(self, response):
item = self.item_cls()
m = self.name_re.search(response.body)
if m:
item['name'] = m.group(1)
item['url'] = response.url
m = self.price_re.search(response.body)
if m:
item['price'] = m.group(1)
return item
class TestDupeFilterSpider(TestSpider):
def make_requests_from_url(self, url):
return Request(url) # dont_filter=False
class DictItemsSpider(TestSpider):
item_cls = dict
def start_test_site(debug=False):
root_dir = os.path.join(tests_datadir, "test_site")
r = static.File(root_dir)
r.putChild("redirect", util.Redirect("/redirected"))
r.putChild("redirected", static.Data("Redirected here", "text/plain"))
port = reactor.listenTCP(0, server.Site(r), interface="127.0.0.1")
if debug:
print("Test server running at http://localhost:%d/ - hit Ctrl-C to finish." \
% port.getHost().port)
return port
class CrawlerRun(object):
"""A class to run the crawler and keep track of events occurred"""
def __init__(self, spider_class):
self.spider = None
self.respplug = []
self.reqplug = []
self.reqdropped = []
self.itemresp = []
self.signals_catched = {}
self.spider_class = spider_class
def run(self):
self.port = start_test_site()
self.portno = self.port.getHost().port
start_urls = [self.geturl("/"), self.geturl("/redirect"),
self.geturl("/redirect")] # a duplicate
for name, signal in vars(signals).items():
if not name.startswith('_'):
dispatcher.connect(self.record_signal, signal)
self.crawler = get_crawler(self.spider_class)
self.crawler.signals.connect(self.item_scraped, signals.item_scraped)
self.crawler.signals.connect(self.request_scheduled, signals.request_scheduled)
self.crawler.signals.connect(self.request_dropped, signals.request_dropped)
self.crawler.signals.connect(self.response_downloaded, signals.response_downloaded)
self.crawler.crawl(start_urls=start_urls)
self.spider = self.crawler.spider
self.deferred = defer.Deferred()
dispatcher.connect(self.stop, signals.engine_stopped)
return self.deferred
def stop(self):
self.port.stopListening()
for name, signal in vars(signals).items():
if not name.startswith('_'):
disconnect_all(signal)
self.deferred.callback(None)
def geturl(self, path):
return "http://localhost:%s%s" % (self.portno, path)
def getpath(self, url):
u = urlparse(url)
return u.path
def item_scraped(self, item, spider, response):
self.itemresp.append((item, response))
def request_scheduled(self, request, spider):
self.reqplug.append((request, spider))
def request_dropped(self, request, spider):
self.reqdropped.append((request, spider))
def response_downloaded(self, response, spider):
self.respplug.append((response, spider))
def record_signal(self, *args, **kwargs):
"""Record a signal and its parameters"""
signalargs = kwargs.copy()
sig = signalargs.pop('signal')
signalargs.pop('sender', None)
self.signals_catched[sig] = signalargs
class EngineTest(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler(self):
for spider in TestSpider, DictItemsSpider:
self.run = CrawlerRun(spider)
yield self.run.run()
self._assert_visited_urls()
self._assert_scheduled_requests(urls_to_visit=8)
self._assert_downloaded_responses()
self._assert_scraped_items()
self._assert_signals_catched()
self.run = CrawlerRun(TestDupeFilterSpider)
yield self.run.run()
self._assert_scheduled_requests(urls_to_visit=7)
self._assert_dropped_requests()
def _assert_visited_urls(self):
must_be_visited = ["/", "/redirect", "/redirected",
"/item1.html", "/item2.html", "/item999.html"]
urls_visited = set([rp[0].url for rp in self.run.respplug])
urls_expected = set([self.run.geturl(p) for p in must_be_visited])
assert urls_expected <= urls_visited, "URLs not visited: %s" % list(urls_expected - urls_visited)
def _assert_scheduled_requests(self, urls_to_visit=None):
self.assertEqual(urls_to_visit, len(self.run.reqplug))
paths_expected = ['/item999.html', '/item2.html', '/item1.html']
urls_requested = set([rq[0].url for rq in self.run.reqplug])
urls_expected = set([self.run.geturl(p) for p in paths_expected])
assert urls_expected <= urls_requested
scheduled_requests_count = len(self.run.reqplug)
dropped_requests_count = len(self.run.reqdropped)
responses_count = len(self.run.respplug)
self.assertEqual(scheduled_requests_count,
dropped_requests_count + responses_count)
def _assert_dropped_requests(self):
self.assertEqual(len(self.run.reqdropped), 1)
def _assert_downloaded_responses(self):
# response tests
self.assertEqual(8, len(self.run.respplug))
for response, _ in self.run.respplug:
if self.run.getpath(response.url) == '/item999.html':
self.assertEqual(404, response.status)
if self.run.getpath(response.url) == '/redirect':
self.assertEqual(302, response.status)
def _assert_scraped_items(self):
self.assertEqual(2, len(self.run.itemresp))
for item, response in self.run.itemresp:
self.assertEqual(item['url'], response.url)
if 'item1.html' in item['url']:
self.assertEqual('Item 1 name', item['name'])
self.assertEqual('100', item['price'])
if 'item2.html' in item['url']:
self.assertEqual('Item 2 name', item['name'])
self.assertEqual('200', item['price'])
def _assert_signals_catched(self):
assert signals.engine_started in self.run.signals_catched
assert signals.engine_stopped in self.run.signals_catched
assert signals.spider_opened in self.run.signals_catched
assert signals.spider_idle in self.run.signals_catched
assert signals.spider_closed in self.run.signals_catched
self.assertEqual({'spider': self.run.spider},
self.run.signals_catched[signals.spider_opened])
self.assertEqual({'spider': self.run.spider},
self.run.signals_catched[signals.spider_idle])
self.run.signals_catched[signals.spider_closed].pop('spider_stats', None) # XXX: remove for scrapy 0.17
self.assertEqual({'spider': self.run.spider, 'reason': 'finished'},
self.run.signals_catched[signals.spider_closed])
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'runserver':
start_test_site(debug=True)
reactor.run()
| bsd-3-clause |
shreyasp/erpnext | erpnext/config/accounts.py | 1 | 9631 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Billing"),
"items": [
{
"type": "doctype",
"name": "Sales Invoice",
"description": _("Bills raised to Customers.")
},
{
"type": "doctype",
"name": "Purchase Invoice",
"description": _("Bills raised by Suppliers.")
},
{
"type": "doctype",
"name": "Payment Request",
"description": _("Payment Request")
},
{
"type": "doctype",
"name": "Payment Entry",
"description": _("Bank/Cash transactions against party or for internal transfer")
},
{
"type": "page",
"name": "pos",
"label": _("POS"),
"description": _("Point of Sale")
},
{
"type": "report",
"name": "Accounts Receivable",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Accounts Payable",
"doctype": "Purchase Invoice",
"is_query_report": True
},
]
},
{
"label": _("Company and Accounts"),
"items": [
{
"type": "doctype",
"name": "Company",
"description": _("Company (not Customer or Supplier) master.")
},
{
"type": "doctype",
"name": "Journal Entry",
"description": _("Accounting journal entries.")
},
{
"type": "doctype",
"name": "Account",
"icon": "icon-sitemap",
"label": _("Chart of Accounts"),
"route": "Tree/Account",
"description": _("Tree of financial accounts."),
},
{
"type": "report",
"name":"General Ledger",
"doctype": "GL Entry",
"is_query_report": True,
},
]
},
{
"label": _("Masters"),
"items": [
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database.")
},
{
"type": "doctype",
"name": "Supplier",
"description": _("Supplier database.")
},
{
"type": "doctype",
"name": "Item",
},
{
"type": "doctype",
"name": "Asset",
},
{
"type": "doctype",
"name": "Asset Category",
}
]
},
{
"label": _("Accounting Statements"),
"items": [
{
"type": "report",
"name": "Trial Balance",
"doctype": "GL Entry",
"is_query_report": True,
},
{
"type": "report",
"name": "Balance Sheet",
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Cash Flow",
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Profit and Loss Statement",
"doctype": "GL Entry",
"is_query_report": True
},
]
},
{
"label": _("Banking and Payments"),
"items": [
{
"type": "doctype",
"label": _("Update Bank Transaction Dates"),
"name": "Bank Reconciliation",
"description": _("Update bank payment dates with journals.")
},
{
"type": "doctype",
"label": _("Match Payments with Invoices"),
"name": "Payment Reconciliation",
"description": _("Match non-linked Invoices and Payments.")
},
{
"type": "report",
"name": "Bank Reconciliation Statement",
"is_query_report": True,
"doctype": "Journal Entry"
},
{
"type": "report",
"name": "Bank Clearance Summary",
"is_query_report": True,
"doctype": "Journal Entry"
},
]
},
{
"label": _("Taxes"),
"items": [
{
"type": "doctype",
"name": "Sales Taxes and Charges Template",
"description": _("Tax template for selling transactions.")
},
{
"type": "doctype",
"name": "Purchase Taxes and Charges Template",
"description": _("Tax template for buying transactions.")
},
{
"type": "doctype",
"name": "Tax Rule",
"description": _("Tax Rule for transactions.")
},
{
"type": "report",
"name": "Sales Register",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Purchase Register",
"doctype": "Purchase Invoice",
"is_query_report": True
},
]
},
{
"label": _("Budget and Cost Center"),
"items": [
{
"type": "doctype",
"name": "Cost Center",
"icon": "icon-sitemap",
"label": _("Chart of Cost Centers"),
"route": "Tree/Cost Center",
"description": _("Tree of financial Cost Centers."),
},
{
"type": "doctype",
"name": "Budget",
"description": _("Define budget for a financial year.")
},
{
"type": "report",
"name": "Budget Variance Report",
"is_query_report": True,
"doctype": "Cost Center"
},
{
"type":"doctype",
"name": "Monthly Distribution",
"description": _("Seasonality for setting budgets, targets etc.")
},
]
},
{
"label": _("Tools"),
"items": [
{
"type": "doctype",
"name": "Period Closing Voucher",
"description": _("Close Balance Sheet and book Profit or Loss.")
},
{
"type": "doctype",
"name": "Asset Movement",
"description": _("Transfer an asset from one warehouse to another")
},
{
"type": "doctype",
"name": "Cheque Print Template",
"description": _("Setup cheque dimensions for printing")
},
]
},
{
"label": _("Setup"),
"icon": "icon-cog",
"items": [
{
"type": "doctype",
"name": "Accounts Settings",
"description": _("Default settings for accounting transactions.")
},
{
"type": "doctype",
"name": "Fiscal Year",
"description": _("Financial / accounting year.")
},
{
"type": "doctype",
"name": "Currency",
"description": _("Enable / disable currencies.")
},
{
"type": "doctype",
"name": "Currency Exchange",
"description": _("Currency exchange rate master.")
},
{
"type": "doctype",
"name": "Payment Gateway Account",
"description": _("Setup Gateway accounts.")
},
{
"type": "doctype",
"name": "POS Profile",
"label": _("Point-of-Sale Profile"),
"description": _("Rules to calculate shipping amount for a sale")
},
{
"type": "doctype",
"name":"Terms and Conditions",
"label": _("Terms and Conditions Template"),
"description": _("Template of terms or contract.")
},
{
"type": "doctype",
"name":"Mode of Payment",
"description": _("e.g. Bank, Cash, Credit Card")
},
{
"type": "doctype",
"name":"C-Form",
"description": _("C-Form records"),
"country": "India"
}
]
},
{
"label": _("To Bill"),
"items": [
{
"type": "report",
"name": "Ordered Items To Be Billed",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Delivered Items To Be Billed",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Purchase Order Items To Be Billed",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
{
"type": "report",
"name": "Received Items To Be Billed",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
]
},
{
"label": _("Analytics"),
"items": [
{
"type": "report",
"name": "Gross Profit",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Purchase Invoice Trends",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
{
"type": "report",
"name": "Sales Invoice Trends",
"is_query_report": True,
"doctype": "Sales Invoice"
},
]
},
{
"label": _("Other Reports"),
"icon": "icon-table",
"items": [
{
"type": "report",
"name": "Asset Depreciation Ledger",
"doctype": "Asset",
"is_query_report": True,
},
{
"type": "report",
"name": "Asset Depreciations and Balances",
"doctype": "Asset",
"is_query_report": True,
},
{
"type": "report",
"name": "Trial Balance for Party",
"doctype": "GL Entry",
"is_query_report": True,
},
{
"type": "report",
"name": "Payment Period Based On Invoice Date",
"is_query_report": True,
"doctype": "Journal Entry"
},
{
"type": "report",
"name": "Sales Partners Commission",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Item-wise Sales Register",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Item-wise Purchase Register",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
{
"type": "report",
"name": "Accounts Receivable Summary",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Accounts Payable Summary",
"doctype": "Purchase Invoice",
"is_query_report": True
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Credit Balance",
"doctype": "Customer"
},
]
},
{
"label": _("Help"),
"icon": "icon-facetime-video",
"items": [
{
"type": "help",
"label": _("Chart of Accounts"),
"youtube_id": "DyR-DST-PyA"
},
{
"type": "help",
"label": _("Opening Accounting Balance"),
"youtube_id": "kdgM20Q-q68"
},
{
"type": "help",
"label": _("Setting up Taxes"),
"youtube_id": "nQ1zZdPgdaQ"
}
]
}
]
| gpl-3.0 |
pubs/pubs | pubs/commands/rename_cmd.py | 1 | 1161 | from __future__ import unicode_literals
from ..uis import get_ui
from .. import color
from .. import repo
from ..utils import resolve_citekey
from ..completion import CiteKeyCompletion
def parser(subparsers, conf):
parser = subparsers.add_parser('rename',
help='rename the citekey of a repository')
parser.add_argument('citekey', help='current citekey'
).completer = CiteKeyCompletion(conf)
parser.add_argument('new_citekey', help='new citekey')
return parser
def command(conf, args):
"""
:param bibfile: bibtex file (in .bib, .bibml or .yaml format.
:param docfile: path (no url yet) to a pdf or ps file
"""
ui = get_ui()
rp = repo.Repository(conf)
# TODO: here should be a test whether the new citekey is valid
key = resolve_citekey(rp, conf, args.citekey, ui=ui, exit_on_fail=True)
paper = rp.pull_paper(key)
rp.rename_paper(paper, args.new_citekey)
ui.message("The '{}' citekey has been renamed into '{}'".format(
color.dye_out(args.citekey, 'citekey'),
color.dye_out(args.new_citekey, 'citekey')))
rp.close()
| lgpl-3.0 |
rxuriguera/bibtexIndexMaker | src/bibim/util/tests/test_beautifulsoup.py | 1 | 1810 |
# Copyright 2010 Ramon Xuriguera
#
# This file is part of BibtexIndexMaker.
#
# BibtexIndexMaker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BibtexIndexMaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BibtexIndexMaker. If not, see <http://www.gnu.org/licenses/>.
import unittest #@UnresolvedImport
from bibim.util.beautifulsoup import BeautifulSoup
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_create_soup_from_None(self):
self.failUnlessRaises(TypeError, BeautifulSoup, None)
def test_create_soup_from_empty_string(self):
try:
soup = BeautifulSoup('')
self.failIf(soup is None)
except:
self.fail("Soup of empty string shouldn't raise an exception")
def test_get_text_from_non_leaf(self):
soup = BeautifulSoup('<html><body>'
'<div>'
'<span>Text 01</span>'
'<span>Text 02</span>'
'</div>'
'</html></body>')
text = soup.findAll('div', text=True)
self.failUnless(len(text) == 2)
self.failUnless(text[0] == u'Text 01')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-3.0 |
mdeejay/kernel_huawei_omap4 | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
valkyriesavage/invenio | modules/bibupload/lib/batchuploader_engine.py | 2 | 22837 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader core functions. Uploading metadata and documents.
"""
import os
import pwd, grp
import time
import tempfile
from invenio.dbquery import run_sql
from invenio.access_control_engine import acc_authorize_action
from invenio.webuser import collect_user_info
from invenio.config import CFG_BINDIR, CFG_TMPDIR, CFG_LOGDIR, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \
CFG_BATCHUPLOADER_WEB_ROBOT_AGENT, \
CFG_PREFIX, CFG_SITE_LANG
from invenio.webinterface_handler_wsgi_utils import Field
from invenio.textutils import encode_for_xml
from invenio.bibtask import task_low_level_submission
from invenio.messages import gettext_set_language
PERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',
'--insert', '--replace', '--correct', '--append']
def cli_allocate_record(req):
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, this useragent cannot use the service."
_log(msg)
return _write(req, msg)
recid = run_sql("insert into bibrec (creation_date,modification_date) values(NOW(),NOW())")
return recid
def cli_upload(req, file_content=None, mode=None):
""" Robot interface for uploading MARC files
"""
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, this useragent cannot use the service."
_log(msg)
return _write(req, msg)
arg_file = file_content
arg_mode = mode
if not arg_file:
msg = "[ERROR] Please specify file body to input."
_log(msg)
return _write(req, msg)
if not arg_mode:
msg = "[ERROR] Please specify upload mode to use."
_log(msg)
return _write(req, msg)
if not arg_mode in PERMITTED_MODES:
msg = "[ERROR] Invalid upload mode."
_log(msg)
return _write(req, msg)
if isinstance(arg_file, Field):
arg_file = arg_file.value
# write temporary file:
tempfile.tempdir = CFG_TMPDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(arg_file)
filedesc.close()
# check if this client can run this file:
client_ip = _get_client_ip(req)
permitted_dbcollids = CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]
if permitted_dbcollids != ['*']: # wildcard
allow = _check_client_can_submit_file(client_ip, filename, req, 0)
if not allow:
msg = "[ERROR] Cannot submit such a file from this IP. (Wrong collection.)"
_log(msg)
return _write(req, msg)
# run upload command:
cmd = CFG_BINDIR + '/bibupload -u batchupload ' + arg_mode + ' ' + filename
os.system(cmd)
msg = "[INFO] %s" % cmd
_log(msg)
return _write(req, msg)
def metadata_upload(req, metafile=None, mode=None, exec_date=None, exec_time=None, metafilename=None, ln=CFG_SITE_LANG):
"""
Metadata web upload service. Get upload parameters and exec bibupload for the given file.
Finally, write upload history.
@return: tuple (error code, message)
error code: code that indicates if an error ocurred
message: message describing the error
"""
# start output:
req.content_type = "text/html"
req.send_http_header()
# write temporary file:
metafile = metafile.value
user_info = collect_user_info(req)
tempfile.tempdir = CFG_TMPDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
user_info['nickname'] + "_" + time.strftime("%Y%m%d%H%M%S",
time.localtime()) + "_" + metafilename + "_")
filedesc = open(filename, 'w')
filedesc.write(metafile)
filedesc.close()
# check if this client can run this file:
allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)
if allow[0] != 0:
return (allow[0], allow[1])
# run upload command:
if exec_date:
date = "\'" + exec_date + ' ' + exec_time + "\'"
jobid = task_low_level_submission('bibupload', user_info['nickname'], mode, "--name=" + metafilename,"-t", date, filename)
else:
jobid = task_low_level_submission('bibupload', user_info['nickname'], mode, "--name=" + metafilename, filename)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "metadata")""",
(user_info['nickname'], metafilename,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid), ))
return (0, "Task %s queued" % str(jobid))
def document_upload(req=None, folder="", matching="", mode="", exec_date="", exec_time="", ln=CFG_SITE_LANG):
""" Take files from the given directory and upload them with the appropiate mode.
@parameters:
+ folder: Folder where the files to upload are stored
+ matching: How to match file names with record fields (report number, barcode,...)
+ mode: Upload mode (append, revise, replace)
@return: tuple (file, error code)
file: file name causing the error to notify the user
error code:
1 - More than one possible recID, ambiguous behaviour
2 - No records match that file name
3 - File already exists
"""
import sys
if sys.hexversion < 0x2060000:
from md5 import md5
else:
from hashlib import md5
from invenio.bibdocfile import BibRecDocs, file_strip_ext
import shutil
from invenio.search_engine import perform_request_search, \
search_pattern, \
guess_collection_of_a_record
_ = gettext_set_language(ln)
errors = []
info = [0, []] # Number of files read, name of the files
try:
files = os.listdir(folder)
except OSError, error:
errors.append(("", error))
return errors, info
err_desc = {1: _("More than one possible recID, ambiguous behaviour"), 2: _("No records match that file name"),
3: _("File already exists"), 4: _("A file with the same name and format already exists"),
5: _("No rights to upload to collection '%s'")}
# Create directory DONE/ if doesn't exist
folder = (folder[-1] == "/") and folder or (folder + "/")
files_done_dir = folder + "DONE/"
try:
os.mkdir(files_done_dir)
except OSError:
# Directory exists or no write permission
pass
for docfile in files:
if os.path.isfile(os.path.join(folder, docfile)):
info[0] += 1
identifier = file_strip_ext(docfile)
extension = docfile[len(identifier):]
rec_id = None
if identifier:
rec_id = search_pattern(p=identifier, f=matching, m='e')
if not rec_id:
errors.append((docfile, err_desc[2]))
continue
elif len(rec_id) > 1:
errors.append((docfile, err_desc[1]))
continue
else:
rec_id = str(list(rec_id)[0])
rec_info = BibRecDocs(rec_id)
if rec_info.bibdocs:
for bibdoc in rec_info.bibdocs:
attached_files = bibdoc.list_all_files()
file_md5 = md5(open(os.path.join(folder, docfile), "rb").read()).hexdigest()
num_errors = len(errors)
for attached_file in attached_files:
if attached_file.checksum == file_md5:
errors.append((docfile, err_desc[3]))
break
elif attached_file.fullname == docfile:
errors.append((docfile, err_desc[4]))
break
if len(errors) > num_errors:
continue
# Check if user has rights to upload file
file_collection = guess_collection_of_a_record(int(rec_id))
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=file_collection)
if auth_code != 0:
error_msg = err_desc[5] % file_collection
errors.append((docfile, error_msg))
continue
tempfile.tempdir = CFG_TMPDIR
# Move document to be uploaded to temporary folder
tmp_file = tempfile.mktemp(prefix=identifier + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_", suffix=extension)
shutil.copy(os.path.join(folder, docfile), tmp_file)
# Create MARC temporary file with FFT tag and call bibupload
filename = tempfile.mktemp(prefix=identifier + '_')
filedesc = open(filename, 'w')
marc_content = """ <record>
<controlfield tag="001">%(rec_id)s</controlfield>
<datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="n">%(name)s</subfield>
<subfield code="a">%(path)s</subfield>
</datafield>
</record> """ % {'rec_id': rec_id,
'name': encode_for_xml(identifier),
'path': encode_for_xml(tmp_file),
}
filedesc.write(marc_content)
filedesc.close()
info[1].append(docfile)
user_info = collect_user_info(req)
user = user_info['nickname']
if not user:
user = "batchupload"
# Execute bibupload with the appropiate mode
if exec_date:
date = '--runtime=' + "\'" + exec_date + ' ' + exec_time + "\'"
jobid = task_low_level_submission('bibupload', user, "--" + mode, "--name=" + docfile, date, filename)
else:
jobid = task_low_level_submission('bibupload', user, "--" + mode, "--name=" + docfile, filename)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "document")""",
(user_info['nickname'], docfile,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid)))
# Move file to DONE folder
done_filename = docfile + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_" + str(jobid)
try:
os.rename(os.path.join(folder, docfile), os.path.join(files_done_dir, done_filename))
except OSError:
errors.append('MoveError')
return errors, info
def get_user_metadata_uploads(req):
"""Retrieve all metadata upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="metadata"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_user_document_uploads(req):
"""Retrieve all document upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="document"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_daemon_doc_files():
""" Return all files found in batchuploader document folders """
files = {}
for folder in ['/revise', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/documents' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def get_daemon_meta_files():
""" Return all files found in batchuploader metadata folders """
files = {}
for folder in ['/correct', '/replace', '/insert', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/metadata' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def _get_client_ip(req):
"""Return client IP address from req object."""
return str(req.remote_ip)
def _check_client_ip(req):
"""
Is this client permitted to use the service?
"""
client_ip = _get_client_ip(req)
if client_ip in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.keys():
return True
return False
def _check_client_useragent(req):
"""
Is this user agent permitted to use the service?
"""
user_info = collect_user_info(req)
client_useragent = user_info['agent']
if client_useragent in CFG_BATCHUPLOADER_WEB_ROBOT_AGENT:
return True
return False
def _check_client_can_submit_file(client_ip="", metafile="", req=None, webupload=0, ln=CFG_SITE_LANG):
"""
Is this client able to upload such a FILENAME?
check 980 $a values and collection tags in the file to see if they are among the
permitted ones as specified by CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS and ACC_AUTHORIZE_ACTION.
Useful to make sure that the client does not override other records by
mistake.
"""
from invenio.bibrecord import create_records
_ = gettext_set_language(ln)
recs = create_records(metafile, 0, 0)
user_info = collect_user_info(req)
filename_tag980_values = _detect_980_values_from_marcxml_file(recs)
for filename_tag980_value in filename_tag980_values:
if not filename_tag980_value:
if not webupload:
return False
else:
return(1, "Invalid tag 980 value")
if not webupload:
if not filename_tag980_value in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_tag980_value)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_tag980_value}
return (auth_code, error_msg)
filename_rec_id_collections = _detect_collections_from_marcxml_file(recs)
for filename_rec_id_collection in filename_rec_id_collections:
if not webupload:
if not filename_rec_id_collection in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_rec_id_collection)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_rec_id_collection}
return (auth_code, error_msg)
if not webupload:
return True
else:
return (0, " ")
def _detect_980_values_from_marcxml_file(recs):
"""
Read MARCXML file and return list of 980 $a values found in that file.
Useful for checking rights.
"""
from invenio.bibrecord import record_get_field_values
collection_tag = run_sql("SELECT value FROM tag, field_tag, field \
WHERE tag.id=field_tag.id_tag AND \
field_tag.id_field=field.id AND \
field.code='collection'")
collection_tag = collection_tag[0][0]
dbcollids = {}
for rec, dummy1, dummy2 in recs:
if rec:
for tag980 in record_get_field_values(rec,
tag=collection_tag[:3],
ind1=collection_tag[3],
ind2=collection_tag[4],
code=collection_tag[5]):
dbcollids[tag980] = 1
return dbcollids.keys()
def _detect_collections_from_marcxml_file(recs):
"""
Extract all possible recIDs from MARCXML file and guess collections
for these recIDs.
"""
from invenio.bibrecord import record_get_field_values
from invenio.search_engine import guess_collection_of_a_record
from invenio.bibupload import find_record_from_sysno, \
find_records_from_extoaiid, \
find_record_from_oaiid
dbcollids = {}
sysno_tag = CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG
oaiid_tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
oai_tag = CFG_OAI_ID_FIELD
for rec, dummy1, dummy2 in recs:
if rec:
for tag001 in record_get_field_values(rec, '001'):
collection = guess_collection_of_a_record(int(tag001))
dbcollids[collection] = 1
for tag_sysno in record_get_field_values(rec, tag=sysno_tag[:3],
ind1=sysno_tag[3],
ind2=sysno_tag[4],
code=sysno_tag[5]):
record = find_record_from_sysno(tag_sysno)
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oaiid in record_get_field_values(rec, tag=oaiid_tag[:3],
ind1=oaiid_tag[3],
ind2=oaiid_tag[4],
code=oaiid_tag[5]):
record = find_records_from_extoaiid(tag_oaiid)
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oai in record_get_field_values(rec, tag=oai_tag[0:3],
ind1=oai_tag[3],
ind2=oai_tag[4],
code=oai_tag[5]):
record = find_record_from_oaiid(tag_oai)
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
return dbcollids.keys()
def _log(msg, logfile="webupload.log"):
"""
Log MSG into LOGFILE with timestamp.
"""
filedesc = open(CFG_LOGDIR + "/" + logfile, "a")
filedesc.write(time.strftime("%Y-%m-%d %H:%M:%S") + " --> " + msg + "\n")
filedesc.close()
return
def _write(req, msg):
"""
Write MSG to the output stream for the end user.
"""
req.write(msg + "\n")
return
| gpl-2.0 |
lo-co/atm-py | atmPy/for_removal/LAS/LAS.py | 6 | 8188 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 10 11:43:10 2014
@author: htelg
"""
import datetime
import warnings
import numpy as np
import pandas as pd
import pylab as plt
from StringIO import StringIO as io
from scipy.interpolate import UnivariateSpline
from atmPy.aerosols.size_distr import sizedistribution
def read_csv(fname):
las = _readFromFakeXLS(fname)
sd,hk = _separate_sizedist_and_housekeep(las)
bins = _get_bins(sd)
dist = sizedistribution.SizeDist_TS(sd, bins, "numberConcentration")
return dist
def _separate_sizedist_and_housekeep(las):
"""Beside separating size distribution and housekeeping this
function also converts the data to a numberconcentration (#/cc)
Parameters
----------
las: pandas.DataFrame"""
sd = las.copy()
hk = las.copy()
k = sd.keys()
where = np.argwhere(k == 'Flow sccm') + 1
khk = k[: where]
sd = sd.drop(khk, axis=1)
hsd = k[where:]
hk = hk.drop(hsd, axis=1)
hk['Sample sccm'] = hk['Sample sccm'].astype(float)
hk['Accum. Secs'] = hk['Accum. Secs'].astype(float)
# normalize to time and flow
sd = sd.mul(60./hk['Sample sccm'] / hk['Accum. Secs'], axis = 0 )
return sd,hk
def _get_bins(frame, log=False):
"""
get the bins from the column labels of the size distribution DataFrame.
"""
frame = frame.copy()
bins = np.zeros(frame.keys().shape[0]+1)
for e, i in enumerate(frame.keys()):
bin_s, bin_e = i.split(' ')
bin_s = float(bin_s)
bin_e = float(bin_e)
bins[e] = bin_s
bins[e+1] = bin_e
return bins #binCenters
def _readFromFakeXLS(fname):
"""reads and shapes a XLS file produced by the LAS instrument"""
fr = pd.read_csv(fname, sep='\t')
newcolname = [fr.columns[e] + ' ' + str(fr.values[0][e]) for e, i in enumerate(fr.columns)]
fr.columns = newcolname
fr = fr.drop(fr.index[0])
bla = pd.Series(fr['Date -'].values + ' ' + fr['Time -'].values)
fr.index = bla.map(lambda x: datetime.datetime.strptime(x, '%m/%d/%Y %I:%M:%S.%f %p'))
fr = fr.drop(['Date -', 'Time -'], axis=1)
return fr
# def _getBinCenters(frame, binedges=False, log=False):
# """
# LAS gives the bin edges, this calculates the bin centers.
# if log is True, the center will be with respect to the log10 ... log(d_{n+1})-log(d_{n})
# if binedges is True, frame is not really a frame but the binedges (array with dtype=float)
# Make sure you are running "removeHousekeeping" first
# """
# frame = frame.copy()
#
# if binedges:
# if log:
# binCenters = 10**((np.log10(frame[:-1]) + np.log10(frame[1:]))/2.)
# else:
#
# binCenters = (frame[:-1] + frame[1:])/2.
# else:
# binCenters = np.zeros(frame.keys().shape)
# for e, i in enumerate(frame.keys()):
# bin_s, bin_e = i.split(' ')
# bin_s = float(bin_s)
# bin_e = float(bin_e)
# normTo = bin_e - bin_s
# frame[i] = frame[i].divide(normTo)
# if log:
# binCenters[e] = 10**((np.log10(bin_e) + np.log10(bin_s))/2.)
# else:
# binCenters[e] = (bin_e + bin_s)/2.
# return binCenters
# def getTimeIntervalFromFrame(frame, start, end):
# """cutes out a particular time interval from frame.
# e.g.: getTimeIntervalFromFrame(frame,'2014-10-31 18:10:00','2014-10-31 18:10:00')"""
# frame = frame.copy()
# if start:
# frame = frame.truncate(before = start)
#
# if end:
# frame = frame.truncate(after = end)
#
# return frame
#
# def frame2singleDistribution(frame):
# frame = frame.copy()
# singleHist = np.zeros(frame.shape[1])
# for i in xrange(frame.shape[1]):
# singleHist[i] = np.nansum(frame.values[:,i])
# singleHist /= frame.shape[0]
# return singleHist
def _string2Dataframe(data):
sb = io(data)
dataFrame = pd.read_csv(sb, sep=' ', names=('d', 'amp')).sort('d')
return dataFrame
def read_Calibration_fromString(data):
'''
unit of diameter must be nm
data = """140 88
150 102
173 175
200 295
233 480
270 740
315 880
365 1130
420 1350
490 1930
570 3050
660 4200
770 5100
890 6300
1040 8000
1200 8300
1400 10000
1600 11500
1880 16000
2180 21000
2500 28000
3000 37000"""
'''
dataFrame = _string2Dataframe(data)
calibrationInstance = calibration(dataFrame)
return calibrationInstance
def save_Calibration(calibrationInstance, fname):
"""should be saved hier cd ~/data/POPS_calibrations/"""
calibrationInstance.data.to_csv(fname, index = False)
return
# def plot_distMap_LAS(fr_d,binEdgensLAS_d):
# binCenters = getBinCenters(binEdgensLAS_d , binedges= True, log = True)
# TIME_LAS,D_LAS,DNDP_LAS = frameToXYZ(fr_d, binCenters)
# f,a = plt.subplots()
# pcIm = a.pcolormesh(TIME_LAS,D_LAS,
# DNDP_LAS,
# norm = LogNorm(),#vmin = 3,vmax = distZoom.data.values.max()),#vmin = 1e-5),
# # cmap=plt.cm.RdYlBu_r,
# # cmap = plt.cm.terrain_r,
# cmap = hm.get_colorMap_intensity(),#plt.cm.hot_r, #PuBuGn,
# # shading='gouraud',
# )
# a.semilogy()
# a.set_ylim((150,2500))
# a.set_ylabel('Diameter (nm)')
# a.set_xlabel('Time')
# a.set_title('LAS')
# cb = f.colorbar(pcIm)
# cb.set_label("Particle number (cm$^{-3}\,$s$^{-1}$)")
# f.autofmt_xdate()
# # a.yaxis.set_minor_formatter(FormatStrFormatter("%i"))
# # a.yaxis.set_major_formatter(FormatStrFormatter("%i"))
class calibration:
def __init__(self,dataTabel):
self.data = dataTabel
self.calibrationFunction = self.get_calibrationFunctionSpline()
def save_csv(self,fname):
save_Calibration(self,fname)
return
def get_calibrationFunctionSpline(self, fitOrder=1):
"""
Performes a spline fit/smoothening (scipy.interpolate.UnivariateSpline) of d over amp (yes this way not the other way around).
Returns (generates): creates a function self.spline which can later be used to calculate d from amp
Optional Parameters:
\t s: int - oder of the spline function
\t noOfPts: int - length of generated graph
\t plot: boolean - if result is supposed to be plotted
"""
# The following two step method is necessary to get a smooth curve.
#When I only do the second step on the cal_curve I get some wired whiggles
##### First Step
if (self.data.amp.values[1:]-self.data.amp.values[:-1]).min() < 0:
warnings.warn('The data represent a non injective function! This will not work. plot the calibration to see what I meen')
sf = UnivariateSpline(self.data.d.values, self.data.amp.values, s=fitOrder)
d = np.logspace(np.log10(self.data.d.values.min()), np.log10(self.data.d.values.max()), 500)
amp = sf(d)
# second step
cal_function = UnivariateSpline(amp, d, s=fitOrder)
return cal_function
def plot_calibration(self):
"""Plots the calibration function and data
Arguments
------------
cal: calibration instance
Returns
------------
figure
axes
calibration data graph
calibration function graph
"""
cal_function = self.calibrationFunction
amp = np.logspace(np.log10(self.data.amp.min()), np.log10(self.data.amp.max()), 500)
d = cal_function(amp)
f, a = plt.subplots()
cal_data, = a.plot(self.data.d, self.data.amp, 'o', label='data',)
cal_func, = a.plot(d, amp, label='function')
a.loglog()
a.set_xlim(0.9*self.data.d.min(), 1.1*self.data.d.max())
a.set_xlabel('Diameter (nm)')
a.set_ylim(0.9*self.data.amp.min(), 1.1*self.data.amp.max())
a.set_ylabel('Amplitude (digitizer bins)')
a.set_title('Calibration curve')
a.legend(loc = 2)
return f, a, cal_data, cal_func | mit |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/urlparse.py | 32 | 14414 | """Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh']
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache."""
_parse_cache.clear()
class ResultMixin(object):
"""Shared methods for the parsed result objects."""
@property
def username(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
userinfo = userinfo.split(":", 1)[0]
return userinfo
return None
@property
def password(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)[1]
return None
@property
def hostname(self):
netloc = self.netloc.split('@')[-1]
if '[' in netloc and ']' in netloc:
return netloc.split(']')[0][1:].lower()
elif ':' in netloc:
return netloc.split(':')[0].lower()
elif netloc == '':
return None
else:
return netloc.lower()
@property
def port(self):
netloc = self.netloc.split('@')[-1].split(']')[-1]
if ':' in netloc:
port = netloc.split(':')[1]
return int(port, 10)
else:
return None
from collections import namedtuple
class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunparse(self)
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
tuple = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = tuple
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
return ParseResult(scheme, netloc, url, params, query, fragment)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
for c in url[:i]:
if c not in scheme_chars:
break
else:
try:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path
_testportnum = int(url[i+1:])
except ValueError:
scheme, url = url[:i].lower(), url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and scheme in uses_fragment and '#' in url:
url, fragment = url.split('#', 1)
if scheme in uses_query and '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
def urlunparse(data):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment = data
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
def urlunsplit(data):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment = data
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return url
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return url
if scheme in uses_netloc:
if netloc:
return urlunparse((scheme, netloc, path,
params, query, fragment))
netloc = bnetloc
if path[:1] == '/':
return urlunparse((scheme, netloc, path,
params, query, fragment))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return urlunparse((scheme, netloc, path,
params, query, fragment))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
return defrag, frag
else:
return url, ''
# unquote method for parse_qs and parse_qsl
# Cannot use directly from urllib as it would create a circular reference
# because urllib uses urlparse methods (urljoin). If you update this function,
# update it also in urllib. This code duplication does not existin in Python3.
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a+b, chr(int(a+b,16)))
for a in _hexdig for b in _hexdig)
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
res = s.split('%')
# fastpath
if len(res) == 1:
return s
s = res[0]
for item in res[1:]:
try:
s += _hextochr[item[:2]] + item[2:]
except KeyError:
s += '%' + item
except UnicodeDecodeError:
s += unichr(int(item[:2], 16)) + item[2:]
return s
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
dict = {}
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
if name in dict:
dict[name].append(value)
else:
dict[name] = [value]
return dict
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a list, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError, "bad query field: %r" % (name_value,)
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote(nv[0].replace('+', ' '))
value = unquote(nv[1].replace('+', ' '))
r.append((name, value))
return r
| gpl-2.0 |
dpla/zen | lib/akamod/__init__.py | 2 | 1845 | #zen.akamod
#Some helper classes for accessing local Zen/Akara services
import urllib
from amara.thirdparty import httplib2, json
from akara import logger
from akara import request
from akara.caching import cache
from akara import global_config
from akara.util import find_peer_service
class geolookup_service(object):
'''
Convenience for calling the local/peer geolookup service.
Can only be called from within an Akara module handler. E.g. the following sample module:
-- %< --
from akara.services import simple_service
from zen.akamod import geolookup_service
geolookup = geolookup_service()
@simple_service("GET", "http://testing/report.get")
def s(place):
return repr(geolookup('Superior,CO'))
-- %< --
Then test: curl -i "http://localhost:8880/s?place=Superior,CO"
'''
def __init__(self):
self.GEOLOOKUP_URI = find_peer_service(u'http://purl.org/com/zepheira/services/geolookup.json')
self.H = httplib2.Http('/tmp/.cache')
return
def __call__(self, place):
if not place:
return None
if isinstance(place, unicode):
place = place.encode('utf-8')
if not self.GEOLOOKUP_URI: setup()
logger.debug('geolookup' + repr((place, self.GEOLOOKUP_URI)))
resp, body = self.H.request(self.GEOLOOKUP_URI + '?' + urllib.urlencode({'place': place}))
logger.debug('geolookup result: {0}'.format(repr(body)))
try:
result = json.loads(body)
return result
#latlong = json.loads(body).itervalues().next()
#return latlong
except (ValueError, StopIteration), e:
logger.debug("Not found: " + repr(place))
return None
#GEOLOOKUP_CACHE = cache(
# 'http://purl.org/com/zepheira/services/geolookup.json', expires=24*60*60)
| apache-2.0 |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py | 2 | 11616 | # Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for printing.py."""
import StringIO
import optparse
import sys
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system import logtesting
from webkitpy.layout_tests import port
from webkitpy.layout_tests.controllers import manager
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.views import printing
def get_options(args):
print_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=print_options)
return option_parser.parse_args(args)
class TestUtilityFunctions(unittest.TestCase):
def test_print_options(self):
options, _ = get_options([])
self.assertIsNotNone(options)
class FakeRunResults(object):
def __init__(self, total=1, expected=1, unexpected=0, fake_results=None):
fake_results = fake_results or []
self.total = total
self.expected = expected
self.expected_failures = 0
self.unexpected = unexpected
self.expected_skips = 0
self.results_by_name = {}
total_run_time = 0
for result in fake_results:
self.results_by_name[result.shard_name] = result
total_run_time += result.total_run_time
self.run_time = total_run_time + 1
class FakeShard(object):
def __init__(self, shard_name, total_run_time):
self.shard_name = shard_name
self.total_run_time = total_run_time
class Testprinter(unittest.TestCase):
def assertEmpty(self, stream):
self.assertFalse(stream.getvalue())
def assertNotEmpty(self, stream):
self.assertTrue(stream.getvalue())
def assertWritten(self, stream, contents):
self.assertEqual(stream.buflist, contents)
def reset(self, stream):
stream.buflist = []
stream.buf = ''
def get_printer(self, args=None):
args = args or []
printing_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=printing_options)
options, args = option_parser.parse_args(args)
host = MockHost()
self._port = host.port_factory.get('test', options)
regular_output = StringIO.StringIO()
printer = printing.Printer(self._port, options, regular_output)
return printer, regular_output
def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
failures = []
if result_type == test_expectations.TIMEOUT:
failures = [test_failures.FailureTimeout()]
elif result_type == test_expectations.CRASH:
failures = [test_failures.FailureCrash()]
return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
def test_configure_and_cleanup(self):
# This test verifies that calling cleanup repeatedly and deleting
# the object is safe.
printer, _ = self.get_printer()
printer.cleanup()
printer.cleanup()
printer = None
def test_print_config(self):
printer, err = self.get_printer()
# FIXME: Make it so these options don't have to be set directly.
# pylint: disable=protected-access
printer._options.pixel_tests = True
printer._options.new_baseline = True
printer._options.time_out_ms = 6000
printer._options.slow_time_out_ms = 12000
printer._options.order = 'random'
printer._options.seed = 1234
printer.print_config('/tmp')
self.assertIn("Using port 'test-mac-mac10.10'", err.getvalue())
self.assertIn('Test configuration: <mac10.10, x86, release>', err.getvalue())
self.assertIn('View the test results at file:///tmp', err.getvalue())
self.assertIn('View the archived results dashboard at file:///tmp', err.getvalue())
self.assertIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())
self.assertIn('Using Release build', err.getvalue())
self.assertIn('Pixel tests enabled', err.getvalue())
self.assertIn('Command line:', err.getvalue())
self.assertIn('Regular timeout: ', err.getvalue())
self.assertIn('Using random order with seed: 1234', err.getvalue())
self.reset(err)
printer._options.quiet = True
printer.print_config('/tmp')
self.assertNotIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())
def test_print_directory_timings(self):
printer, err = self.get_printer()
printer._options.debug_rwt_logging = True
run_results = FakeRunResults()
run_results.results_by_name = {
"slowShard": FakeShard("slowShard", 16),
"borderlineShard": FakeShard("borderlineShard", 15),
"fastShard": FakeShard("fastShard", 1),
}
printer._print_directory_timings(run_results)
self.assertWritten(err, ['Time to process slowest subdirectories:\n',
' slowShard took 16.0 seconds to run 1 tests.\n', '\n'])
printer, err = self.get_printer()
printer._options.debug_rwt_logging = True
run_results.results_by_name = {
"borderlineShard": FakeShard("borderlineShard", 15),
"fastShard": FakeShard("fastShard", 1),
}
printer._print_directory_timings(run_results)
self.assertWritten(err, [])
def test_print_one_line_summary(self):
def run_test(total, exp, unexp, shards, result):
printer, err = self.get_printer(['--timing'] if shards else None)
fake_results = FakeRunResults(total, exp, unexp, shards)
total_time = fake_results.run_time + 1
printer._print_one_line_summary(total_time, fake_results)
self.assertWritten(err, result)
# Without times:
run_test(1, 1, 0, [], ["The test ran as expected.\n", "\n"])
run_test(2, 1, 1, [], ["\n", "1 test ran as expected, 1 didn't:\n", "\n"])
run_test(3, 2, 1, [], ["\n", "2 tests ran as expected, 1 didn't:\n", "\n"])
run_test(3, 2, 0, [], ["\n", "2 tests ran as expected (1 didn't run).\n", "\n"])
# With times:
fake_shards = [FakeShard("foo", 1), FakeShard("bar", 2)]
run_test(1, 1, 0, fake_shards, ["The test ran as expected in 5.00s (2.00s in rwt, 1x).\n", "\n"])
run_test(2, 1, 1, fake_shards, ["\n", "1 test ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", "\n"])
run_test(3, 2, 1, fake_shards, ["\n", "2 tests ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", "\n"])
run_test(3, 2, 0, fake_shards, ["\n", "2 tests ran as expected (1 didn't run) in 5.00s (2.00s in rwt, 1x).\n", "\n"])
def test_test_status_line(self):
printer, _ = self.get_printer()
printer._meter.number_of_columns = lambda: 80
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(80, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associa...after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: 89
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(89, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-...ents-after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: sys.maxsize
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(90, len(actual))
self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html passed')
printer._meter.number_of_columns = lambda: 18
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(18, len(actual))
self.assertEqual(actual, '[0/0] f...l passed')
printer._meter.number_of_columns = lambda: 10
actual = printer._test_status_line(
'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(actual, '[0/0] associated-elements-after-index-assertion-fail1.html passed')
def test_details(self):
printer, err = self.get_printer(['--details'])
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=False, exp_str='', got_str='')
self.assertNotEmpty(err)
def test_print_found(self):
printer, err = self.get_printer()
printer.print_found(100, 10, 1, 1)
self.assertWritten(err, ["Found 100 tests; running 10, skipping 90.\n"])
self.reset(err)
printer.print_found(100, 10, 2, 3)
self.assertWritten(err, ["Found 100 tests; running 10 (6 times each: --repeat-each=2 --iterations=3), skipping 90.\n"])
def test_debug_rwt_logging_is_throttled(self):
printer, err = self.get_printer(['--debug-rwt-logging'])
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=True, exp_str='', got_str='')
printer.print_started_test('passes/text.html')
result = self.get_result('passes/text.html')
printer.print_finished_test(result, expected=True, exp_str='', got_str='')
# Only the first test's start should be printed.
lines = err.buflist
self.assertEqual(len(lines), 1)
self.assertTrue(lines[0].endswith('passes/image.html\n'))
| gpl-3.0 |
lakshayg/tensorflow | tensorflow/python/debug/cli/cli_config_test.py | 68 | 5541 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cli_config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import shutil
import tempfile
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class CLIConfigTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mkdtemp()
self._tmp_config_path = os.path.join(self._tmp_dir, ".tfdbg_config")
self.assertFalse(gfile.Exists(self._tmp_config_path))
super(CLIConfigTest, self).setUp()
def tearDown(self):
shutil.rmtree(self._tmp_dir)
super(CLIConfigTest, self).tearDown()
def testConstructCLIConfigWithoutFile(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(20, config.get("graph_recursion_depth"))
self.assertEqual(True, config.get("mouse_mode"))
with self.assertRaises(KeyError):
config.get("property_that_should_not_exist")
self.assertTrue(gfile.Exists(self._tmp_config_path))
def testCLIConfigForwardCompatibilityTest(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with open(self._tmp_config_path, "rt") as f:
config_json = json.load(f)
# Remove a field to simulate forward compatibility test.
del config_json["graph_recursion_depth"]
with open(self._tmp_config_path, "wt") as f:
json.dump(config_json, f)
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(20, config.get("graph_recursion_depth"))
def testModifyConfigValue(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", 9)
config.set("mouse_mode", False)
self.assertEqual(9, config.get("graph_recursion_depth"))
self.assertEqual(False, config.get("mouse_mode"))
def testModifyConfigValueWithTypeCasting(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", "18")
config.set("mouse_mode", "false")
self.assertEqual(18, config.get("graph_recursion_depth"))
self.assertEqual(False, config.get("mouse_mode"))
def testModifyConfigValueWithTypeCastingFailure(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(ValueError):
config.set("mouse_mode", "maybe")
def testLoadFromModifiedConfigFile(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", 9)
config.set("mouse_mode", False)
config2 = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(9, config2.get("graph_recursion_depth"))
self.assertEqual(False, config2.get("mouse_mode"))
def testSummarizeFromConfig(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
output = config.summarize()
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: %d" % config.get("graph_recursion_depth"),
" mouse_mode: %s" % config.get("mouse_mode")], output.lines)
def testSummarizeFromConfigWithHighlight(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
output = config.summarize(highlight="mouse_mode")
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: %d" % config.get("graph_recursion_depth"),
" mouse_mode: %s" % config.get("mouse_mode")], output.lines)
self.assertEqual((2, 12, ["underline", "bold"]),
output.font_attr_segs[3][0])
self.assertEqual((14, 18, "bold"), output.font_attr_segs[3][1])
def testSetCallback(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
test_value = {"graph_recursion_depth": -1}
def callback(config):
test_value["graph_recursion_depth"] = config.get("graph_recursion_depth")
config.set_callback("graph_recursion_depth", callback)
config.set("graph_recursion_depth", config.get("graph_recursion_depth") - 1)
self.assertEqual(test_value["graph_recursion_depth"],
config.get("graph_recursion_depth"))
def testSetCallbackInvalidPropertyName(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(KeyError):
config.set_callback("nonexistent_property_name", print)
def testSetCallbackNotCallable(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(TypeError):
config.set_callback("graph_recursion_depth", 1)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
caosmo/pip | pip/_vendor/requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| mit |
marrow/web.dispatch.route | test/test_router.py | 1 | 1820 | # encoding: utf-8
import pytest
from web.dispatch.route.router import __DYNAMIC__, Router
from sample import Root
@pytest.fixture
def router():
return Router.from_object(Root)
def test_dynamic_repr():
assert repr(__DYNAMIC__) == '<dynamic element>'
def test_router_singleton():
assert Router.from_object(Root) is Router.from_object(Root)
def test_invalid_route():
router = Router()
with pytest.raises(ValueError):
router.parse("{bad:/}")
class TestRouterSample(object):
def test_single_static(self, router):
assert len(router.routes) == 1 # There's only a single top-level element.
assert 'user' in router.routes # It's "user".
assert len(router.routes['user']) == 2 # Which has a terminus and dynamic continuation.
assert router.routes['user'][None] == Root.root # The terminus is the "root" method.
assert router.routes['user'][None](Root()) == "I'm all people." # It really is.
def test_dynamic_username(self, router):
assert __DYNAMIC__ in router.routes['user']
dynamic = router.routes['user'][__DYNAMIC__]
assert len(dynamic) == 1
assert list(dynamic.keys())[0].match("GothAlice") # The regular expression matches.
assert len(list(dynamic.values())[0]) == 2
assert list(dynamic.values())[0][None] == Root.user
assert list(dynamic.values())[0][None](Root(), "GothAlice") == "Hi, I'm GothAlice"
def test_dynamic_username_action(self, router):
assert __DYNAMIC__ in router.routes['user']
dynamic = router.routes['user'][__DYNAMIC__]
assert len(dynamic) == 1
assert list(dynamic.keys())[0].match("GothAlice") # The regular expression matches.
assert len(list(dynamic.values())[0]) == 2
assert list(dynamic.values())[0][None] == Root.user
assert list(dynamic.values())[0][None](Root(), "GothAlice") == "Hi, I'm GothAlice"
| mit |
saeschdivara/ArangoPy | arangodb/tests/api/collection.py | 1 | 2502 | import unittest
from arangodb.api import Database, Collection
class CollectionTestCase(unittest.TestCase):
def setUp(self):
self.database_name = 'testcase_collection_123'
self.db = Database.create(name=self.database_name)
def tearDown(self):
Database.remove(name=self.database_name)
def test_create_and_delete_collection_without_extra_db(self):
collection_name = 'test_foo_123'
col = Collection.create(name=collection_name)
self.assertIsNotNone(col)
Collection.remove(name=collection_name)
def test_get_collection(self):
collection_name = 'test_foo_123'
col = Collection.create(name=collection_name)
self.assertIsNotNone(col)
retrieved_col = Collection.get_loaded_collection(name=collection_name)
self.assertEqual(col.id, retrieved_col.id)
self.assertEqual(col.name, retrieved_col.name)
self.assertEqual(col.type, retrieved_col.type)
Collection.remove(name=collection_name)
def test_getting_new_info_for_collection(self):
collection_name = 'test_foo_123'
col = Collection.create(name=collection_name)
retrieved_col = Collection.get_loaded_collection(name=collection_name)
retrieved_col.set_data(waitForSync=True)
retrieved_col.save()
col.get()
self.assertEqual(col.waitForSync, True)
Collection.remove(name=collection_name)
def test_different_document_revisions(self):
collection_name = 'test_revision_documents'
col = Collection.create(name=collection_name)
doc1 = col.create_document()
doc1.save()
all_documents = col.documents()
self.assertEqual(len(all_documents), 1)
doc = all_documents[0]
self.assertEqual(doc.revision, doc1.revision)
doc.foo = 'bar'
doc.save()
self.assertNotEqual(doc.revision, doc1.revision)
Collection.remove(name=collection_name)
def test_remove_document_from_collection(self):
collection_name = 'test_remove_document_from_collection'
col = Collection.create(name=collection_name)
doc1 = col.create_document()
doc1.save()
all_documents = col.documents()
self.assertEqual(len(all_documents), 1)
doc = all_documents[0]
doc.delete()
all_documents = col.documents()
self.assertEqual(len(all_documents), 0)
Collection.remove(name=collection_name) | mit |
mintar/ros-infrastructure-rosdistro | src/rosdistro/source_repository_specification.py | 1 | 2465 | # Software License Agreement (BSD License)
#
# Copyright (c) 2014, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .repository_specification import RepositorySpecification
class SourceRepositorySpecification(RepositorySpecification):
def __init__(self, name, data):
super(SourceRepositorySpecification, self).__init__(name, data)
self.test_commits = None
if 'test_commits' in data:
self.test_commits = bool(data['test_commits'])
self.test_pull_requests = None
if 'test_pull_requests' in data:
self.test_pull_requests = bool(data['test_pull_requests'])
def get_data(self):
data = self._get_data(skip_git_type=False)
if self.test_commits is not None:
data['test_commits'] = self.test_commits
if self.test_pull_requests is not None:
data['test_pull_requests'] = self.test_pull_requests
return data
| bsd-3-clause |
mgagne/nova | nova/baserpc.py | 10 | 2562 | #
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Base RPC client and server common to all services.
"""
from oslo.config import cfg
from oslo import messaging
from oslo.serialization import jsonutils
from nova import rpc
CONF = cfg.CONF
rpcapi_cap_opt = cfg.StrOpt('baseapi',
help='Set a version cap for messages sent to the base api in any '
'service')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
_NAMESPACE = 'baseapi'
class BaseAPI(object):
"""Client side of the base rpc API.
API version history:
1.0 - Initial version.
1.1 - Add get_backdoor_port
"""
VERSION_ALIASES = {
# baseapi was added in havana
}
def __init__(self, topic):
super(BaseAPI, self).__init__()
target = messaging.Target(topic=topic,
namespace=_NAMESPACE,
version='1.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.baseapi,
CONF.upgrade_levels.baseapi)
self.client = rpc.get_client(target, version_cap=version_cap)
def ping(self, context, arg, timeout=None):
arg_p = jsonutils.to_primitive(arg)
cctxt = self.client.prepare(timeout=timeout)
return cctxt.call(context, 'ping', arg=arg_p)
def get_backdoor_port(self, context, host):
cctxt = self.client.prepare(server=host, version='1.1')
return cctxt.call(context, 'get_backdoor_port')
class BaseRPCAPI(object):
"""Server side of the base RPC API."""
target = messaging.Target(namespace=_NAMESPACE, version='1.1')
def __init__(self, service_name, backdoor_port):
self.service_name = service_name
self.backdoor_port = backdoor_port
def ping(self, context, arg):
resp = {'service': self.service_name, 'arg': arg}
return jsonutils.to_primitive(resp)
def get_backdoor_port(self, context):
return self.backdoor_port
| apache-2.0 |
CatsAndDogsbvba/odoo | addons/l10n_tr/__openerp__.py | 259 | 2056 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Turkey - Accounting',
'version': '1.beta',
'category': 'Localization/Account Charts',
'description': """
Türkiye için Tek düzen hesap planı şablonu OpenERP Modülü.
==========================================================
Bu modül kurulduktan sonra, Muhasebe yapılandırma sihirbazı çalışır
* Sihirbaz sizden hesap planı şablonu, planın kurulacağı şirket, banka hesap
bilgileriniz, ilgili para birimi gibi bilgiler isteyecek.
""",
'author': 'Ahmet Altınışık',
'maintainer':'https://launchpad.net/~openerp-turkey',
'website':'https://launchpad.net/openerp-turkey',
'depends': [
'account',
'base_vat',
'account_chart',
],
'data': [
'account_code_template.xml',
'account_tdhp_turkey.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_tax_template.xml',
'l10n_tr_wizard.xml',
],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
abdulla-alali/CRE-NS3 | src/fd-net-device/bindings/modulegen__gcc_LP64.py | 6 | 300516 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.fd_net_device', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## system-mutex.h (module 'core'): ns3::CriticalSection [class]
module.add_class('CriticalSection', import_from_module='ns.core')
## data-rate.h (module 'network'): ns3::DataRate [class]
module.add_class('DataRate', import_from_module='ns.network')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## pcap-file.h (module 'network'): ns3::PcapFile [class]
module.add_class('PcapFile', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [class]
module.add_class('PcapHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration]
module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## system-condition.h (module 'core'): ns3::SystemCondition [class]
module.add_class('SystemCondition', import_from_module='ns.core')
## system-mutex.h (module 'core'): ns3::SystemMutex [class]
module.add_class('SystemMutex', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## fd-net-device-helper.h (module 'fd-net-device'): ns3::FdNetDeviceHelper [class]
module.add_class('FdNetDeviceHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class]
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::FdReader', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FdReader>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## system-thread.h (module 'core'): ns3::SystemThread [class]
module.add_class('SystemThread', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## data-rate.h (module 'network'): ns3::DataRateChecker [class]
module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## data-rate.h (module 'network'): ns3::DataRateValue [class]
module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::EmuFdNetDeviceHelper [class]
module.add_class('EmuFdNetDeviceHelper', parent=root_module['ns3::FdNetDeviceHelper'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## unix-fd-reader.h (module 'core'): ns3::FdReader [class]
module.add_class('FdReader', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## planetlab-fd-net-device-helper.h (module 'fd-net-device'): ns3::PlanetLabFdNetDeviceHelper [class]
module.add_class('PlanetLabFdNetDeviceHelper', parent=root_module['ns3::EmuFdNetDeviceHelper'])
## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::TapFdNetDeviceHelper [class]
module.add_class('TapFdNetDeviceHelper', parent=root_module['ns3::EmuFdNetDeviceHelper'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice [class]
module.add_class('FdNetDevice', parent=root_module['ns3::NetDevice'])
## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice::EncapsulationMode [enumeration]
module.add_enum('EncapsulationMode', ['DIX', 'LLC', 'DIXPI'], outer_class=root_module['ns3::FdNetDevice'])
## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDeviceFdReader [class]
module.add_class('FdNetDeviceFdReader', parent=root_module['ns3::FdReader'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])
register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection'])
register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile'])
register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper'])
register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3SystemCondition_methods(root_module, root_module['ns3::SystemCondition'])
register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3FdNetDeviceHelper_methods(root_module, root_module['ns3::FdNetDeviceHelper'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker'])
register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EmuFdNetDeviceHelper_methods(root_module, root_module['ns3::EmuFdNetDeviceHelper'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3PlanetLabFdNetDeviceHelper_methods(root_module, root_module['ns3::PlanetLabFdNetDeviceHelper'])
register_Ns3TapFdNetDeviceHelper_methods(root_module, root_module['ns3::TapFdNetDeviceHelper'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3FdNetDevice_methods(root_module, root_module['ns3::FdNetDevice'])
register_Ns3FdNetDeviceFdReader_methods(root_module, root_module['ns3::FdNetDeviceFdReader'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AsciiTraceHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function]
cls.add_method('CreateFileStream',
'ns3::Ptr< ns3::OutputStreamWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')])
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('std::string', 'prefix')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CriticalSection_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::CriticalSection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::SystemMutex & mutex) [constructor]
cls.add_constructor([param('ns3::SystemMutex &', 'mutex')])
return
def register_Ns3DataRate_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('>=')
## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRate const &', 'arg0')])
## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor]
cls.add_constructor([param('uint64_t', 'bps')])
## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor]
cls.add_constructor([param('std::string', 'rate')])
## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function]
cls.add_method('CalculateTxTime',
'double',
[param('uint32_t', 'bytes')],
is_const=True)
## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function]
cls.add_method('GetBitRate',
'uint64_t',
[],
is_const=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[])
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PcapFile_methods(root_module, cls):
## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor]
cls.add_constructor([])
## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function]
cls.add_method('Diff',
'bool',
[param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')],
is_static=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function]
cls.add_method('GetSwapMode',
'bool',
[])
## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function]
cls.add_method('Read',
'void',
[param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable]
cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True)
## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable]
cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True)
return
def register_Ns3PcapHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=65535, int32_t tzCorrection=0) [member function]
cls.add_method('CreateFile',
'ns3::Ptr< ns3::PcapFileWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='65535'), param('int32_t', 'tzCorrection', default_value='0')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3PcapHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function]
cls.add_method('EnablePcapAll',
'void',
[param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3SystemCondition_methods(root_module, cls):
## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition(ns3::SystemCondition const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemCondition const &', 'arg0')])
## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition() [constructor]
cls.add_constructor([])
## system-condition.h (module 'core'): void ns3::SystemCondition::Broadcast() [member function]
cls.add_method('Broadcast',
'void',
[])
## system-condition.h (module 'core'): bool ns3::SystemCondition::GetCondition() [member function]
cls.add_method('GetCondition',
'bool',
[])
## system-condition.h (module 'core'): void ns3::SystemCondition::SetCondition(bool condition) [member function]
cls.add_method('SetCondition',
'void',
[param('bool', 'condition')])
## system-condition.h (module 'core'): void ns3::SystemCondition::Signal() [member function]
cls.add_method('Signal',
'void',
[])
## system-condition.h (module 'core'): bool ns3::SystemCondition::TimedWait(uint64_t ns) [member function]
cls.add_method('TimedWait',
'bool',
[param('uint64_t', 'ns')])
## system-condition.h (module 'core'): void ns3::SystemCondition::Wait() [member function]
cls.add_method('Wait',
'void',
[])
return
def register_Ns3SystemMutex_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex(ns3::SystemMutex const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemMutex const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex() [constructor]
cls.add_constructor([])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Lock() [member function]
cls.add_method('Lock',
'void',
[])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Unlock() [member function]
cls.add_method('Unlock',
'void',
[])
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3FdNetDeviceHelper_methods(root_module, cls):
## fd-net-device-helper.h (module 'fd-net-device'): ns3::FdNetDeviceHelper::FdNetDeviceHelper(ns3::FdNetDeviceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FdNetDeviceHelper const &', 'arg0')])
## fd-net-device-helper.h (module 'fd-net-device'): ns3::FdNetDeviceHelper::FdNetDeviceHelper() [constructor]
cls.add_constructor([])
## fd-net-device-helper.h (module 'fd-net-device'): ns3::NetDeviceContainer ns3::FdNetDeviceHelper::Install(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True, is_virtual=True)
## fd-net-device-helper.h (module 'fd-net-device'): ns3::NetDeviceContainer ns3::FdNetDeviceHelper::Install(std::string name) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('std::string', 'name')],
is_const=True, is_virtual=True)
## fd-net-device-helper.h (module 'fd-net-device'): ns3::NetDeviceContainer ns3::FdNetDeviceHelper::Install(ns3::NodeContainer const & c) const [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer const &', 'c')],
is_const=True, is_virtual=True)
## fd-net-device-helper.h (module 'fd-net-device'): void ns3::FdNetDeviceHelper::SetAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
## fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr<ns3::NetDevice> ns3::FdNetDeviceHelper::InstallPriv(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('InstallPriv',
'ns3::Ptr< ns3::NetDevice >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True, visibility='protected', is_virtual=True)
## fd-net-device-helper.h (module 'fd-net-device'): void ns3::FdNetDeviceHelper::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
## fd-net-device-helper.h (module 'fd-net-device'): void ns3::FdNetDeviceHelper::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3PcapFileWrapper_methods(root_module, cls):
## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor]
cls.add_constructor([])
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter< ns3::FdReader > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SystemThread_methods(root_module, cls):
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemThread const &', 'arg0')])
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor]
cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(pthread_t id) [member function]
cls.add_method('Equals',
'bool',
[param('pthread_t', 'id')],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function]
cls.add_method('Join',
'void',
[])
## system-thread.h (module 'core'): static pthread_t ns3::SystemThread::Self() [member function]
cls.add_method('Self',
'pthread_t',
[],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DataRateChecker_methods(root_module, cls):
## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')])
return
def register_Ns3DataRateValue_methods(root_module, cls):
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor]
cls.add_constructor([])
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')])
## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor]
cls.add_constructor([param('ns3::DataRate const &', 'value')])
## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function]
cls.add_method('Get',
'ns3::DataRate',
[],
is_const=True)
## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::DataRate const &', 'value')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EmuFdNetDeviceHelper_methods(root_module, cls):
## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::EmuFdNetDeviceHelper::EmuFdNetDeviceHelper(ns3::EmuFdNetDeviceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmuFdNetDeviceHelper const &', 'arg0')])
## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::EmuFdNetDeviceHelper::EmuFdNetDeviceHelper() [constructor]
cls.add_constructor([])
## emu-fd-net-device-helper.h (module 'fd-net-device'): std::string ns3::EmuFdNetDeviceHelper::GetDeviceName() [member function]
cls.add_method('GetDeviceName',
'std::string',
[])
## emu-fd-net-device-helper.h (module 'fd-net-device'): void ns3::EmuFdNetDeviceHelper::SetDeviceName(std::string deviceName) [member function]
cls.add_method('SetDeviceName',
'void',
[param('std::string', 'deviceName')])
## emu-fd-net-device-helper.h (module 'fd-net-device'): int ns3::EmuFdNetDeviceHelper::CreateFileDescriptor() const [member function]
cls.add_method('CreateFileDescriptor',
'int',
[],
is_const=True, visibility='protected', is_virtual=True)
## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr<ns3::NetDevice> ns3::EmuFdNetDeviceHelper::InstallPriv(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('InstallPriv',
'ns3::Ptr< ns3::NetDevice >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True, visibility='protected', is_virtual=True)
## emu-fd-net-device-helper.h (module 'fd-net-device'): void ns3::EmuFdNetDeviceHelper::SetFileDescriptor(ns3::Ptr<ns3::FdNetDevice> device) const [member function]
cls.add_method('SetFileDescriptor',
'void',
[param('ns3::Ptr< ns3::FdNetDevice >', 'device')],
is_const=True, visibility='protected', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3FdReader_methods(root_module, cls):
## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader(ns3::FdReader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FdReader const &', 'arg0')])
## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader() [constructor]
cls.add_constructor([])
## unix-fd-reader.h (module 'core'): void ns3::FdReader::Start(int fd, ns3::Callback<void, unsigned char*, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> readCallback) [member function]
cls.add_method('Start',
'void',
[param('int', 'fd'), param('ns3::Callback< void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'readCallback')])
## unix-fd-reader.h (module 'core'): void ns3::FdReader::Stop() [member function]
cls.add_method('Stop',
'void',
[])
## unix-fd-reader.h (module 'core'): ns3::FdReader::Data ns3::FdReader::DoRead() [member function]
cls.add_method('DoRead',
'ns3::FdReader::Data',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3PlanetLabFdNetDeviceHelper_methods(root_module, cls):
## planetlab-fd-net-device-helper.h (module 'fd-net-device'): ns3::PlanetLabFdNetDeviceHelper::PlanetLabFdNetDeviceHelper(ns3::PlanetLabFdNetDeviceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PlanetLabFdNetDeviceHelper const &', 'arg0')])
## planetlab-fd-net-device-helper.h (module 'fd-net-device'): ns3::PlanetLabFdNetDeviceHelper::PlanetLabFdNetDeviceHelper() [constructor]
cls.add_constructor([])
## planetlab-fd-net-device-helper.h (module 'fd-net-device'): void ns3::PlanetLabFdNetDeviceHelper::SetTapIpAddress(ns3::Ipv4Address address) [member function]
cls.add_method('SetTapIpAddress',
'void',
[param('ns3::Ipv4Address', 'address')])
## planetlab-fd-net-device-helper.h (module 'fd-net-device'): void ns3::PlanetLabFdNetDeviceHelper::SetTapMask(ns3::Ipv4Mask mask) [member function]
cls.add_method('SetTapMask',
'void',
[param('ns3::Ipv4Mask', 'mask')])
## planetlab-fd-net-device-helper.h (module 'fd-net-device'): int ns3::PlanetLabFdNetDeviceHelper::CreateFileDescriptor() const [member function]
cls.add_method('CreateFileDescriptor',
'int',
[],
is_const=True, visibility='protected', is_virtual=True)
## planetlab-fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr<ns3::NetDevice> ns3::PlanetLabFdNetDeviceHelper::InstallPriv(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('InstallPriv',
'ns3::Ptr< ns3::NetDevice >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True, visibility='protected', is_virtual=True)
## planetlab-fd-net-device-helper.h (module 'fd-net-device'): void ns3::PlanetLabFdNetDeviceHelper::SetFileDescriptor(ns3::Ptr<ns3::FdNetDevice> device) const [member function]
cls.add_method('SetFileDescriptor',
'void',
[param('ns3::Ptr< ns3::FdNetDevice >', 'device')],
is_const=True, visibility='protected', is_virtual=True)
return
def register_Ns3TapFdNetDeviceHelper_methods(root_module, cls):
## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::TapFdNetDeviceHelper::TapFdNetDeviceHelper(ns3::TapFdNetDeviceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TapFdNetDeviceHelper const &', 'arg0')])
## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::TapFdNetDeviceHelper::TapFdNetDeviceHelper() [constructor]
cls.add_constructor([])
## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetModePi(bool pi) [member function]
cls.add_method('SetModePi',
'void',
[param('bool', 'pi')])
## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv4Address(ns3::Ipv4Address address) [member function]
cls.add_method('SetTapIpv4Address',
'void',
[param('ns3::Ipv4Address', 'address')])
## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv4Mask(ns3::Ipv4Mask mask) [member function]
cls.add_method('SetTapIpv4Mask',
'void',
[param('ns3::Ipv4Mask', 'mask')])
## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv6Address(ns3::Ipv6Address address) [member function]
cls.add_method('SetTapIpv6Address',
'void',
[param('ns3::Ipv6Address', 'address')])
## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv6Prefix(int prefix) [member function]
cls.add_method('SetTapIpv6Prefix',
'void',
[param('int', 'prefix')])
## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapMacAddress(ns3::Mac48Address mac) [member function]
cls.add_method('SetTapMacAddress',
'void',
[param('ns3::Mac48Address', 'mac')])
## tap-fd-net-device-helper.h (module 'fd-net-device'): int ns3::TapFdNetDeviceHelper::CreateFileDescriptor() const [member function]
cls.add_method('CreateFileDescriptor',
'int',
[],
is_const=True, visibility='protected', is_virtual=True)
## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr<ns3::NetDevice> ns3::TapFdNetDeviceHelper::InstallPriv(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('InstallPriv',
'ns3::Ptr< ns3::NetDevice >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True, visibility='protected', is_virtual=True)
## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetFileDescriptor(ns3::Ptr<ns3::FdNetDevice> device) const [member function]
cls.add_method('SetFileDescriptor',
'void',
[param('ns3::Ptr< ns3::FdNetDevice >', 'device')],
is_const=True, visibility='protected', is_virtual=True)
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3FdNetDevice_methods(root_module, cls):
## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice::FdNetDevice() [constructor]
cls.add_constructor([])
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): ns3::Ptr<ns3::Channel> ns3::FdNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice::EncapsulationMode ns3::FdNetDevice::GetEncapsulationMode() const [member function]
cls.add_method('GetEncapsulationMode',
'ns3::FdNetDevice::EncapsulationMode',
[],
is_const=True)
## fd-net-device.h (module 'fd-net-device'): uint32_t ns3::FdNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): uint16_t ns3::FdNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): ns3::Ptr<ns3::Node> ns3::FdNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): static ns3::TypeId ns3::FdNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetEncapsulationMode(ns3::FdNetDevice::EncapsulationMode mode) [member function]
cls.add_method('SetEncapsulationMode',
'void',
[param('ns3::FdNetDevice::EncapsulationMode', 'mode')])
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetFileDescriptor(int fd) [member function]
cls.add_method('SetFileDescriptor',
'void',
[param('int', 'fd')])
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetIsBroadcast(bool broadcast) [member function]
cls.add_method('SetIsBroadcast',
'void',
[param('bool', 'broadcast')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetIsMulticast(bool multicast) [member function]
cls.add_method('SetIsMulticast',
'void',
[param('bool', 'multicast')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::Start(ns3::Time tStart) [member function]
cls.add_method('Start',
'void',
[param('ns3::Time', 'tStart')])
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::Stop(ns3::Time tStop) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time', 'tStop')])
## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3FdNetDeviceFdReader_methods(root_module, cls):
## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDeviceFdReader::FdNetDeviceFdReader(ns3::FdNetDeviceFdReader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FdNetDeviceFdReader const &', 'arg0')])
## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDeviceFdReader::FdNetDeviceFdReader() [constructor]
cls.add_constructor([])
## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDeviceFdReader::SetBufferSize(uint32_t bufferSize) [member function]
cls.add_method('SetBufferSize',
'void',
[param('uint32_t', 'bufferSize')])
## fd-net-device.h (module 'fd-net-device'): ns3::FdReader::Data ns3::FdNetDeviceFdReader::DoRead() [member function]
cls.add_method('DoRead',
'ns3::FdReader::Data',
[],
visibility='private', is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
aneeshusa/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/serializer/htmlserializer.py | 423 | 12897 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| mpl-2.0 |
xifle/home-assistant | homeassistant/components/climate/heatmiser.py | 22 | 3369 | """
Support for the PRT Heatmiser themostats using the V3 protocol.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.heatmiser/
"""
import logging
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA
from homeassistant.const import (
TEMP_CELSIUS, ATTR_TEMPERATURE, CONF_PORT, CONF_NAME, CONF_ID)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['heatmiserV3==0.9.1']
_LOGGER = logging.getLogger(__name__)
CONF_IPADDRESS = 'ipaddress'
CONF_TSTATS = 'tstats'
TSTATS_SCHEMA = vol.Schema({
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IPADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_TSTATS, default={}):
vol.Schema({cv.string: TSTATS_SCHEMA}),
})
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the heatmiser thermostat."""
from heatmiserV3 import heatmiser, connection
ipaddress = config.get(CONF_IPADDRESS)
port = str(config.get(CONF_PORT))
tstats = config.get(CONF_TSTATS)
serport = connection.connection(ipaddress, port)
serport.open()
for thermostat, tstat in tstats.items():
add_devices([
HeatmiserV3Thermostat(
heatmiser, tstat.get(CONF_ID), tstat.get(CONF_NAME), serport)
])
return
class HeatmiserV3Thermostat(ClimateDevice):
"""Representation of a HeatmiserV3 thermostat."""
def __init__(self, heatmiser, device, name, serport):
"""Initialize the thermostat."""
self.heatmiser = heatmiser
self.device = device
self.serport = serport
self._current_temperature = None
self._name = name
self._id = device
self.dcb = None
self.update()
self._target_temperature = int(self.dcb.get('roomset'))
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
if self.dcb is not None:
low = self.dcb.get('floortemplow ')
high = self.dcb.get('floortemphigh')
temp = (high * 256 + low) / 10.0
self._current_temperature = temp
else:
self._current_temperature = None
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self.heatmiser.hmSendAddress(
self._id,
18,
temperature,
1,
self.serport)
self._target_temperature = temperature
def update(self):
"""Get the latest data."""
self.dcb = self.heatmiser.hmReadAddress(self._id, 'prt', self.serport)
| mit |
dhruvsrivastava/OJ | flask/lib/python2.7/site-packages/werkzeug/posixemulation.py | 148 | 3483 | # -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import errno
import time
import random
from ._compat import to_unicode
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
src = to_unicode(src, sys.getfilesystemencoding())
dst = to_unicode(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
| bsd-3-clause |
vrv/tensorflow | tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py | 59 | 4537 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distributions KL mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal
from tensorflow.python.platform import test
# pylint: disable=protected-access
_DIVERGENCES = kullback_leibler._DIVERGENCES
_registered_kl = kullback_leibler._registered_kl
# pylint: enable=protected-access
class KLTest(test.TestCase):
def testRegistration(self):
class MyDist(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDist, MyDist)
def _kl(a, b, name=None): # pylint: disable=unused-argument,unused-variable
return name
a = MyDist(loc=0.0, scale=1.0)
self.assertEqual("OK", kullback_leibler.kl_divergence(a, a, name="OK"))
def testDomainErrorExceptions(self):
class MyDistException(normal.Normal):
pass
# Register KL to a lambda that spits out the name parameter
@kullback_leibler.RegisterKL(MyDistException, MyDistException)
# pylint: disable=unused-argument,unused-variable
def _kl(a, b, name=None):
return array_ops.identity([float("nan")])
# pylint: disable=unused-argument,unused-variable
with self.test_session():
a = MyDistException(loc=0.0, scale=1.0)
kl = kullback_leibler.kl_divergence(a, a, allow_nan_stats=False)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
kl.eval()
kl_ok = kullback_leibler.kl_divergence(a, a)
self.assertAllEqual([float("nan")], kl_ok.eval())
def testRegistrationFailures(self):
class MyDist(normal.Normal):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
kullback_leibler.RegisterKL(MyDist, MyDist)("blah")
# First registration is OK
kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)
def testExactRegistrationsAllMatch(self):
for (k, v) in _DIVERGENCES.items():
self.assertEqual(v, _registered_kl(*k))
def testIndirectRegistration(self):
class Sub1(normal.Normal):
pass
class Sub2(normal.Normal):
pass
class Sub11(Sub1):
pass
# pylint: disable=unused-argument,unused-variable
@kullback_leibler.RegisterKL(Sub1, Sub1)
def _kl11(a, b, name=None):
return "sub1-1"
@kullback_leibler.RegisterKL(Sub1, Sub2)
def _kl12(a, b, name=None):
return "sub1-2"
@kullback_leibler.RegisterKL(Sub2, Sub1)
def _kl21(a, b, name=None):
return "sub2-1"
# pylint: enable=unused-argument,unused_variable
sub1 = Sub1(loc=0.0, scale=1.0)
sub2 = Sub2(loc=0.0, scale=1.0)
sub11 = Sub11(loc=0.0, scale=1.0)
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub1, sub1))
self.assertEqual("sub1-2", kullback_leibler.kl_divergence(sub1, sub2))
self.assertEqual("sub2-1", kullback_leibler.kl_divergence(sub2, sub1))
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub11, sub11))
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub11, sub1))
self.assertEqual("sub1-2", kullback_leibler.kl_divergence(sub11, sub2))
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub11, sub1))
self.assertEqual("sub1-2", kullback_leibler.kl_divergence(sub11, sub2))
self.assertEqual("sub2-1", kullback_leibler.kl_divergence(sub2, sub11))
self.assertEqual("sub1-1", kullback_leibler.kl_divergence(sub1, sub11))
if __name__ == "__main__":
test.main()
| apache-2.0 |
charles-cooper/raiden | raiden/tests/smart_contracts/test_endpointregistry.py | 3 | 1083 | # -*- coding: utf-8 -*-
from ethereum import tester
from raiden.utils import get_contract_path
def test_endpointregistry(tester_state, tester_events):
account0 = tester.DEFAULT_ACCOUNT
sender = account0.encode('hex')
endpointregistry_path = get_contract_path('EndpointRegistry.sol')
registry_contract = tester_state.abi_contract(
None,
path=endpointregistry_path,
language='solidity',
log_listener=tester_events.append,
)
registry_contract.registerEndpoint('127.0.0.1:4001')
assert registry_contract.findAddressByEndpoint('127.0.0.1:4001') == sender
assert registry_contract.findEndpointByAddress(sender) == '127.0.0.1:4001'
registry_contract.registerEndpoint('192.168.0.1:4002')
assert registry_contract.findAddressByEndpoint('192.168.0.1:4002') == sender
assert registry_contract.findEndpointByAddress(sender) == '192.168.0.1:4002'
assert len(tester_events) == 2
assert tester_events[0]['_event_type'] == 'AddressRegistered'
assert tester_events[1]['_event_type'] == 'AddressRegistered'
| mit |
Kongsea/tensorflow | tensorflow/contrib/quantize/python/graph_matcher.py | 17 | 6445 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities that match patterns in a tf.Graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class OpTypePattern(object):
"""A tree pattern that matches TF expressions with certain op types."""
def __init__(self, op_type, name=None, inputs=None):
"""Initializes an OpTypePattern.
Args:
op_type: string that specifies the allowed types of the root. It can be
(1) an op type, e.g. 'Conv2D',
(2) '*', i.e. wildcard, or
(3) multiple op types separated by '|', e.g., 'Relu|Relu6'.
We could use regex strings, which might be worthwhile when we have many
similar TF op types.
name: Optional string. The name of the pattern that can be looked up in
MatchResult.
inputs: Optional list of `OpTypePattern`s or strings that specify the
patterns for the inputs of a matching op. If None, this pattern accepts
any inputs of a matching op.
"""
self._op_type = op_type
self._name = name
if inputs is None:
inputs = []
self._inputs = [
input_pattern if isinstance(input_pattern, OpTypePattern) else
OpTypePattern(input_pattern) for input_pattern in inputs
]
@property
def op_type(self):
return self._op_type
@property
def inputs(self):
return self._inputs
@property
def name(self):
return self._name
class MatchResult(object):
r"""Encapsulates the result of a match done by GraphMatcher.
MatchResult contains a map from OpTypePattern to the matching op and tensor.
When the matching op has multiple output tensors, the matching tensor is the
output tensor used by the matching op of the parent pattern. E.g., when we
match graph
- +
/ \y0 y1/ \
x split z
|
y (nodes are ops; edges are going up)
against add_pattern defined as
y1_pattern = OpTypePattern('*')
z_pattern = OpTypePattern('*')
add_pattern = OpTypePattern('+', inputs=[y1_pattern, z_pattern])
the matching op of `y1_pattern` is `split`, and the matching tensor of
`y1_pattern`
is `y1` not `y0`.
"""
def __init__(self):
self._pattern_to_op_tensor = {}
self._name_to_pattern = {}
def add(self, pattern, op, tensor):
self._pattern_to_op_tensor[pattern] = op, tensor
if pattern.name is not None:
if pattern.name in self._name_to_pattern:
raise ValueError(
'Name %s is already bound to another pattern' % pattern.name)
self._name_to_pattern[pattern.name] = pattern
def _to_pattern(self, pattern_or_name):
if isinstance(pattern_or_name, OpTypePattern):
return pattern_or_name
if isinstance(pattern_or_name, str):
return self._name_to_pattern[pattern_or_name]
raise ValueError('pattern_or_name has type %s. Expect OpTypePattern or str.'
% type(pattern_or_name))
def get_op(self, pattern_or_name):
return self._pattern_to_op_tensor[self._to_pattern(pattern_or_name)][0]
def get_tensor(self, pattern_or_name):
return self._pattern_to_op_tensor[self._to_pattern(pattern_or_name)][1]
class GraphMatcher(object):
"""Checks if a particular subgraph matches a given pattern."""
def __init__(self, pattern):
"""Initializes a GraphMatcher.
Args:
pattern: The `OpTypePattern` against which `GraphMatcher` matches
subgraphs.
"""
self._pattern = pattern
def _match_pattern(self, pattern, op, tensor):
"""Returns whether an TF expression rooted at `op` matches `pattern`.
If there is a match, adds to `self._match_result` the matching op and tensor
with key `pattern`.
Args:
pattern: An `OpTypePattern`.
op: A `tf.Operation` to match against the pattern.
tensor: the output `tf.Tensor` of `op` that is used by the matching op of
`pattern`'s parent. Can be None if `pattern` is already the root of the
pattern tree.
Returns:
True if an TF expression rooted at `op` matches `pattern`.
"""
if pattern.op_type != '*':
if op.type not in pattern.op_type.split('|'):
return False
self._match_result.add(pattern, op, tensor)
if not pattern.inputs:
# If pattern.inputs is empty, skips the rest and accepts all the inputs.
return True
return len(op.inputs) == len(pattern.inputs) and all([
self._match_pattern(input_pattern, input_tensor.op, input_tensor)
for input_tensor, input_pattern in zip(op.inputs, pattern.inputs)
])
def match_op(self, op):
"""Matches `op` against `self._pattern`.
Args:
op: `tf.Operation` to match against the pattern.
Returns:
Returns a `MatchResult` if `op` matches the pattern; otherwise, returns
None.
"""
self._match_result = MatchResult()
if not self._match_pattern(self._pattern, op, tensor=None):
return None
return self._match_result
def match_ops(self, ops):
"""Matches each operation in `ops` against `self._pattern`.
Args:
ops: collection of `tf.Operation` to match against the pattern.
Yields:
`MatchResult` for each `tf.Operation` that matches the pattern.
"""
for op in ops:
match_result = self.match_op(op)
if match_result:
yield match_result
def match_graph(self, graph):
"""Matches each operation in `graph` against `self._pattern`.
Args:
graph: `tf.Graph` containing operations to match.
Yields:
`MatchResult` for each `tf.Operation` in `graph` that matches the pattern.
"""
# Python 3.3.2+ implements `yield from`, but for now:
for match_result in self.match_ops(graph.get_operations()):
yield match_result
| apache-2.0 |
huawei-cloud/compass | compass/tests/config_management/utils/test_config_reference.py | 4 | 5733 | import unittest2
from copy import deepcopy
from compass.utils import util
from compass.config_management.utils import config_reference
class TestConfigReference(unittest2.TestCase):
def test_init(self):
config = {'1': {'2': 3, '10': {}}, '4': [5, 6, 7], '8': 8}
ref = config_reference.ConfigReference(config)
config2 = {'5': {'6': 6}}
ref2 = config_reference.ConfigReference(config2['5'], ref, '5')
expected_config = deepcopy(config)
util.merge_dict(expected_config, config2)
self.assertEqual(ref.config, expected_config)
self.assertEqual(id(ref.config['5']), id(ref2.config))
config3 = {'5': {'7': 7}}
ref3 = config_reference.ConfigReference(config3['5'], ref, '5')
self.assertEqual(id(ref.config['5']), id(ref3.config))
def test_ref(self):
config = {'1': {'2': 3, '10': {}}, '4': [5, 6, 7], '8': 8}
ref = config_reference.ConfigReference(config)
self.assertRaises(KeyError, ref.ref, '')
self.assertRaises(KeyError, ref.ref, '/1/2/4')
self.assertEqual(ref.ref('.').config, config)
self.assertEqual(ref.ref('..').config, config)
self.assertEqual(ref.ref('/').config, config)
self.assertEqual(ref.ref('1').config, config['1'])
self.assertEqual(ref.ref('1/2').config, config['1']['2'])
self.assertEqual(ref.ref('1/2/.').config, config['1']['2'])
self.assertEqual(ref.ref('1/2/..').config, config['1'])
self.assertEqual(ref.ref('1/2//').config, config['1']['2'])
self.assertEqual(ref.ref('/1').config, config['1'])
self.assertEqual(ref.ref('/1/2').config, config['1']['2'])
subref = ref.ref('1')
self.assertEqual(subref.ref('2').config, config['1']['2'])
self.assertEqual(subref.ref('2/..').config, config['1'])
self.assertEqual(subref.ref('..').config, config)
self.assertEqual(subref.ref('../..').config, config)
self.assertEqual(subref.ref('/').config, config)
self.assertEqual(subref.ref('/4').config, config['4'])
self.assertRaises(KeyError, subref.ref, '/4/5')
self.assertRaises(KeyError, subref.ref, '/9')
subref2 = ref.ref('9', True)
self.assertEqual(ref.ref('9'), subref2)
def test_refs(self):
config = {'1': {'2': 3, '10': {}}, '4': [5, 6, 7], '8': 8, '88': 88}
ref = config_reference.ConfigReference(config)
refkeys = ref.ref_keys('1')
self.assertEqual(set(refkeys), set(['1']))
refkeys = ref.ref_keys('/1/*')
self.assertEqual(set(refkeys), set(['/1/2', '/1/10']))
refkeys = ref.ref_keys('*')
self.assertEqual(set(refkeys), set(['1', '4', '8', '88']))
refkeys = ref.ref_keys('8*')
self.assertEqual(set(refkeys), set(['8', '88']))
self.assertRaises(KeyError, ref.ref_keys, '')
def test_contains(self):
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
ref = config_reference.ConfigReference(config)
self.assertIn('/1/2', ref)
self.assertIn('1/10/', ref)
self.assertIn('4/', ref)
self.assertIn('/1/2/..', ref)
self.assertNotIn('/1/3/7', ref)
self.assertNotIn('/1/2/3/..', ref)
def test_setitem(self):
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
ref = config_reference.ConfigReference(config)
ref['/1/2'] = '6'
self.assertEqual(config['1']['2'], '6')
self.assertEqual(ref['/1/2'], '6')
ref['1/10/5'] = 7
self.assertEqual(config['1']['10']['5'], 7)
self.assertEqual(ref['1/10/5'], 7)
ref['3/6/8'] = [1, 3, 5]
self.assertEqual(config['3']['6']['8'], [1, 3, 5])
self.assertEqual(ref['3/6/8'], [1, 3, 5])
def test_del(self):
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
ref = config_reference.ConfigReference(config)
del ref['/8']
self.assertNotIn('8', config)
del ref['1/2']
self.assertNotIn('2', config['1'])
del ref['1']
self.assertNotIn('1', config)
self.assertRaises(KeyError, ref.__delitem__, '9')
def test_get(self):
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
ref = config_reference.ConfigReference(config)
self.assertEqual(ref.get('1/2'), config['1']['2'])
self.assertIsNone(ref.get('1/3'))
self.assertEqual(ref.get('1/3', 3), 3)
self.assertNotIn('3', config['1'])
def test_setdefault(self):
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
ref = config_reference.ConfigReference(config)
self.assertEqual(ref.setdefault('1/2').config, config['1']['2'])
self.assertIsNone(ref.setdefault('1/3').config)
self.assertEqual(ref.setdefault('1/4', 4).config, 4)
self.assertEqual(4, config['1']['4'])
def test_update(self):
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
expected_config = deepcopy(config)
ref = config_reference.ConfigReference(config)
config2 = {'9': 9, '10': {'10': 10}}
util.merge_dict(expected_config, config2)
ref.update(config2)
self.assertEqual(ref.config, expected_config)
ref.update(10, False)
self.assertEqual(ref.config, expected_config)
ref.update(10)
self.assertEqual(ref.config, 10)
def test_iter(self):
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
ref = config_reference.ConfigReference(config)
keys = ref.keys()
self.assertEqual(set(keys), set(['1', '1/2', '1/10', '4', '8']))
if __name__ == '__main__':
unittest2.main()
| apache-2.0 |
ilexius/odoo | addons/lunch/tests/test_lunch.py | 47 | 3259 | # -*- coding: utf-8 -*-
from openerp.tests import common
class Test_Lunch(common.TransactionCase):
def setUp(self):
"""*****setUp*****"""
super(Test_Lunch, self).setUp()
self.demo_user = self.env['res.users'].search([('name', '=', 'Demo User')])
self.product_bolognese_ref = self.env['ir.model.data'].get_object_reference('lunch', 'product_Bolognese')
self.product_Bolognese_id = self.product_bolognese_ref and self.product_bolognese_ref[1] or False
self.new_id_order = self.env['lunch.order'].create({
'user_id': self.demo_user.id,
'order_line_ids': '[]',
})
self.new_id_order_line = self.env['lunch.order.line'].create({
'order_id': self.new_id_order.id,
'product_id': self.product_Bolognese_id,
'note': '+Emmental',
'cashmove': [],
'price': self.env['lunch.product'].browse(self.product_Bolognese_id).price,
})
def test_00_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered'. Check that there are no cashmove linked to that order line"""
self.order_one = self.new_id_order_line
#we check that our order_line is a 'new' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state, 'new')
self.assertEqual(list(self.order_one.cashmove), [])
#we order that orderline so it's state will be 'ordered'
self.order_one.order()
self.order_one = self.new_id_order_line
#we check that our order_line is a 'ordered' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state, 'ordered')
self.assertEqual(list(self.order_one.cashmove), [])
def test_01_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered' then to 'confirmed'. Check that there is a cashmove linked to the order line"""
self.test_00_lunch_order()
#We receive the order so we confirm the order line so it's state will be 'confirmed'
#A cashmove will be created and we will test that the cashmove amount equals the order line price
self.order_one.confirm()
self.order_one = self.new_id_order_line
#we check that our order_line is a 'confirmed' one and that there are a cashmove linked to that order_line with an amount equals to the order line price:
self.assertEqual(self.order_one.state, 'confirmed')
self.assertTrue(self.order_one.cashmove)
self.assertTrue(self.order_one.cashmove[0].amount == -self.order_one.price)
def test_02_lunch_order(self):
"""Change the state of an order line from 'confirmed' to 'cancelled' and check that the cashmove linked to that order line will be deleted"""
self.test_01_lunch_order()
#We have a confirmed order with its associate cashmove
#We execute the cancel function
self.order_one.cancel()
self.order_one = self.new_id_order_line
#We check that the state is cancelled and that the cashmove has been deleted
self.assertEqual(self.order_one.state, 'cancelled')
self.assertFalse(self.order_one.cashmove)
| gpl-3.0 |
codecollision/DropboxToFlickr | django/template/smartif.py | 331 | 6261 | """
Parser and utilities for the smart 'if' tag
"""
import operator
# Using a simple top down parser, as described here:
# http://effbot.org/zone/simple-top-down-parsing.htm.
# 'led' = left denotation
# 'nud' = null denotation
# 'bp' = binding power (left = lbp, right = rbp)
class TokenBase(object):
"""
Base class for operators and literals, mainly for debugging and for throwing
syntax errors.
"""
id = None # node/token type name
value = None # used by literals
first = second = None # used by tree nodes
def nud(self, parser):
# Null denotation - called in prefix context
raise parser.error_class(
"Not expecting '%s' in this position in if tag." % self.id
)
def led(self, left, parser):
# Left denotation - called in infix context
raise parser.error_class(
"Not expecting '%s' as infix operator in if tag." % self.id
)
def display(self):
"""
Returns what to display in error messages for this node
"""
return self.id
def __repr__(self):
out = [str(x) for x in [self.id, self.first, self.second] if x is not None]
return "(" + " ".join(out) + ")"
def infix(bp, func):
"""
Creates an infix operator, given a binding power and a function that
evaluates the node
"""
class Operator(TokenBase):
lbp = bp
def led(self, left, parser):
self.first = left
self.second = parser.expression(bp)
return self
def eval(self, context):
try:
return func(context, self.first, self.second)
except Exception:
# Templates shouldn't throw exceptions when rendering. We are
# most likely to get exceptions for things like {% if foo in bar
# %} where 'bar' does not support 'in', so default to False
return False
return Operator
def prefix(bp, func):
"""
Creates a prefix operator, given a binding power and a function that
evaluates the node.
"""
class Operator(TokenBase):
lbp = bp
def nud(self, parser):
self.first = parser.expression(bp)
self.second = None
return self
def eval(self, context):
try:
return func(context, self.first)
except Exception:
return False
return Operator
# Operator precedence follows Python.
# NB - we can get slightly more accurate syntax error messages by not using the
# same object for '==' and '='.
# We defer variable evaluation to the lambda to ensure that terms are
# lazily evaluated using Python's boolean parsing logic.
OPERATORS = {
'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)),
'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)),
'not': prefix(8, lambda context, x: not x.eval(context)),
'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)),
'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)),
'=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)),
'>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)),
'>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)),
'<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)),
'<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)),
}
# Assign 'id' to each:
for key, op in OPERATORS.items():
op.id = key
class Literal(TokenBase):
"""
A basic self-resolvable object similar to a Django template variable.
"""
# IfParser uses Literal in create_var, but TemplateIfParser overrides
# create_var so that a proper implementation that actually resolves
# variables, filters etc is used.
id = "literal"
lbp = 0
def __init__(self, value):
self.value = value
def display(self):
return repr(self.value)
def nud(self, parser):
return self
def eval(self, context):
return self.value
def __repr__(self):
return "(%s %r)" % (self.id, self.value)
class EndToken(TokenBase):
lbp = 0
def nud(self, parser):
raise parser.error_class("Unexpected end of expression in if tag.")
EndToken = EndToken()
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
# pre-pass necessary to turn 'not','in' into single token
l = len(tokens)
mapped_tokens = []
i = 0
while i < l:
token = tokens[i]
if token == "not" and i + 1 < l and tokens[i+1] == "in":
token = "not in"
i += 1 # skip 'in'
mapped_tokens.append(self.translate_token(token))
i += 1
self.tokens = mapped_tokens
self.pos = 0
self.current_token = self.next()
def translate_token(self, token):
try:
op = OPERATORS[token]
except (KeyError, TypeError):
return self.create_var(token)
else:
return op()
def next(self):
if self.pos >= len(self.tokens):
return EndToken
else:
retval = self.tokens[self.pos]
self.pos += 1
return retval
def parse(self):
retval = self.expression()
# Check that we have exhausted all the tokens
if self.current_token is not EndToken:
raise self.error_class("Unused '%s' at end of if expression." %
self.current_token.display())
return retval
def expression(self, rbp=0):
t = self.current_token
self.current_token = self.next()
left = t.nud(self)
while rbp < self.current_token.lbp:
t = self.current_token
self.current_token = self.next()
left = t.led(left, self)
return left
def create_var(self, value):
return Literal(value)
| bsd-3-clause |
patmcb/odoo | addons/account/wizard/account_move_line_unreconcile_select.py | 385 | 1864 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_move_line_unreconcile_select(osv.osv_memory):
_name = "account.move.line.unreconcile.select"
_description = "Unreconciliation"
_columns ={
'account_id': fields.many2one('account.account','Account',required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','<>',False),('state','<>','draft')]" % data['account_id'],
'name': 'Unreconciliation',
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
maellak/invenio | modules/webauthorprofile/lib/webauthorprofile_cli.py | 18 | 1182 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebAuthorProfile command-line interface
"""
from invenio import bibauthorid_config as bconfig
def main():
""" Main function """
try:
from invenio import webauthorprofile_daemon as daemon
except ImportError:
bconfig.LOGGER.error("Hmm...No Daemon process running.")
return
daemon.webauthorprofile_daemon()
if __name__ == '__main__':
main()
| gpl-2.0 |
josefgrosch/Pyetree | pyetree/PyetreeBandnames.py | 1 | 28154 | # -----------------------------------------------------------------------
#
# < pyetree_bandnames.py >
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# File Name : pyetree_bandnames.py
#
# Author : Josef Grosch
#
# Date : 15 October 2015
#
# Modification : Some
#
# Application :
#
# Description :
#
# Notes :
#
# Functions :
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# Copyright
#
# Copyright (c) 2015 - 2016 Moose River LLC.
# < [email protected] >
#
# All Rights Reserved
#
# Deadicated to my brother Jerry Garcia,
# who passed from this life on August 9, 1995.
# Happy trails to you, Jerry
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# License
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# GPG Key
#
# pub 4096R/2C38BBFA 2016-05-21 [expires: 2017-05-21]
# Key fingerprint = A855 3BF0 544B 3B4E F06D 2B90 8DDC FDDA 2C38 BBFA
# uid Josef Grosch <[email protected]>
# sub 4096R/CC2D1F80 2016-05-21 [expires: 2017-05-21]
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# Contact Information
#
# Moose River LLC.
# P.O. Box 9403
# Berkeley, Ca. 94709
#
# http://www.mooseriver.com
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# Import
#
# -----------------------------------------------------------------------
import sys
import os
"""
"""
#--start constants--
__author__ = "Josef Grosch"
__copyright__ = "Copyright 2015 - 2016 Moose River, LLC."
__license__ = "Apache License v2.0"
__version__ = "0.1"
__maintainer__ = "Josef Grosch"
__email__ = "[email protected]"
__status__ = "Development"
#--end constants--
# -----------------------------------------------------------------------
#
# Class pyetree_bandnames
#
# -----------------------------------------------------------------------
class pyetree_bandnames:
OddBandNames = {}
BandNames = {}
BandUrl = {}
BandDB = {}
OddBandNames = {'jm3' : 'John Mayer Trio',
'jmayer3' : 'John Mayer Trio',
'kk' : 'Kai Kln',
'kk' : 'Kudzu Kings',
'rr' : 'Robert Randolph & the Family Band',
'rr' : 'Rusted Root',
'sts9' : 'Sound Tribe Sector 9',
'sy' : 'Seth Yacovone Band',
'sy' : 'Sonic Youth',
'willyp' : 'Willy Porter Band',
'willyp' : 'Willy Porter'
}
BandNames = {'3ah' : '3 Apples High',
'D&T' : 'Dave Matthews & Tim Reynolds',
'DM' : 'Dave Matthews (solo)',
'FB' : 'Les Claypool\'s Fearless Flying Frog Brigade',
'Garcia' : 'Jerry Garcia',
'abb' : 'Allman Brothers Band',
'ah&f' : 'Alan Hertz and Friends',
'ahf' : 'Alan Hertz and Friends',
'ahp' : 'Alan Hertz and Friends',
'amendola' : 'Scott Amendola Band',
'amf' : 'Amfibian',
'bc' : 'Black Crowes',
'bcue' : 'Barbara Cue',
'be' : 'Bockman\'s Euphio',
'beanland' : 'Beanlan',
'bfft' : 'Bela Fleck & the Flecktones',
'bfm' : 'Barefoot Manner',
'bftt' : 'Bela Fleck & Tony Trischka',
'bgug' : 'Blueground Undergrass',
'bh' : 'Ben Harper',
'bh' : 'Bruce Hornsby',
'bhic' : 'Ben Harper and the Innocent Criminals',
'bij' : 'Big In Japan',
'bmelon' : 'Blind Melon',
'bnb' : 'Burt Neilson Band',
'bog' : 'A Band of Gypsys',
'bp' : 'Brothers Past',
'bruce' : 'Bruce Hornsby',
'bs' : 'Big Smith',
'bspear' : 'Burning Spear',
'bt' : 'Blues Traveler',
'bts' : 'Built to Spill',
'buho' : 'El Buho',
'cb' : 'Critters Buggin',
'cbobb' : 'Col. Claypool\'s Bucket of Bernie Brains',
'cc' : 'Counting Crows',
'ccity' : 'Cerulean City',
'ch' : 'Charlie Hunter',
'cj' : 'Cowboy Junkies',
'claypool' : 'Les Claypool\'s Fearless Flying Frog Brigade',
'cracker' : 'Cracker',
'crowes' : 'Black Crowes',
'cvb' : 'Camper van Beethoven',
'db' : 'Disco Biscuits',
'dbb' : 'Deep Banana Blackout',
'dbr' : 'Day by the River',
'dbt' : 'Drive-By Truckers',
'ddbb' : 'Dirty Dozen Brass Band',
'dead' : 'The Dead',
'dgray' : 'David Gray',
'disp' : 'Dispatch',
'djlogic' : 'DJ Logic',
'dmb' : 'Dave Matthews Band',
'dnb' : 'David Nelson Band',
'dso' : 'Dark Star Orchestra',
'dt' : 'Derek Trucks Band',
'dtb' : 'Donna the Buffalo',
'eb' : 'Edie Brickell',
'eh' : 'Ekoostik Hookah',
'farrah' : 'Farrah',
'fffb' : 'Les Claypool\'s Fearless Flying Frog Brigade',
'fhg' : 'Fareed Haque Group',
'gal' : 'Galactic',
'garaj' : 'Garaj Mahal',
'gba' : 'GreyBoy AllStars',
'gd' : 'Grateful Dead',
'glove' : 'G. Love and Special Sauce',
'gm' : 'Gov\'t Mule',
'gsw' : 'God Street Wine',
'gt' : 'Ghost Trane',
'gtb' : 'Grand Theft Bus',
'gus' : 'Guster',
'guster' : 'Guster',
'guymalone' : 'Guy Malone',
'hday' : 'Howie Day',
'hip' : 'The Tragically Hip',
'ho' : 'Schleigho',
'ht' : 'Hot Tuna',
'ig' : 'Indigo Girls',
'itf' : 'Indiana Trip Factory',
'jg' : 'Jerry Garcia',
'jgb' : 'Jerry Garcia Band',
'jh' : 'Jimi Hendrix Experience',
'jhbg' : 'A Band of Gypsys',
'jhe' : 'Jimi Hendrix Experience',
'jj' : 'Jack Johnson',
'jjj' : 'Jerry Joseph & The Jackmormons',
'jk' : 'Jorma Kaukonen',
'jmayer' : 'John Mayer',
'jmos' : 'Jerry Joseph & The Jackmormons',
'jmp' : 'Jazz Mandolin Project',
'jol' : 'Jolene',
'jt' : 'Jeff Tweedy',
'kaikln' : 'Kai Kln',
'kdcw' : 'Karl Denson and Chris Wood',
'kdtu' : 'Karl Denson\'s Tiny Universe',
'kdub' : 'Keller Williams',
'kmck' : 'Steve Kimock & Friends',
'kudzu' : 'Kudzu Kings',
'kvhw' : 'KVHW',
'kw' : 'Keller Williams',
'kwi' : 'Keller Williams String Cheese Incident',
'laf' : 'Life After Failing',
'lcbbb' : 'Col. Claypool\'s Bucket of Bernie Brains',
'lcbobb' : 'Col. Claypool\'s Bucket of Bernie Brains',
'lcfffb' : 'Les Claypool\'s Fearless Flying Frog Brigade',
'ld' : 'Living Daylights',
'lf' : 'Little Feat',
'lfb' : 'Lo Faber Band',
'logic' : 'DJ Logic',
'lom' : 'Legion of Mary',
'los' : 'Leftover Salmon',
'ls' : 'Leftover Salmon',
'lt' : 'Lake Trout',
'mammals' : 'The Mammals',
'marlow' : 'Marlow',
'mcrx' : 'Mike Clark\'s Prescription Renewal',
'mel' : 'Marcus Eaton & The Lobby',
'metheny' : 'Pat Metheny',
'mf' : 'Marc Ford',
'mfs' : 'Michael Franti & Spearhead',
'mmw' : 'Medeski Martin & Wood',
'moe' : 'moe.',
'mule' : 'Gov\'t Mule',
'nd' : 'The New Deal',
'nmas' : 'North Mississippi Allstars',
'oar' : 'O.A.R. (Of A Revolution)',
'oh' : 'Oysterhead',
'or' : 'Oregon',
'oregon' : 'Oregon',
'oysterhead' : 'Oysterhead',
'p&f' : 'Phil Lesh & Friends',
'p+f' : 'Phil Lesh & Friends',
'paf' : 'Phil Lesh & Friends',
'par' : 'Particle',
'particle' : 'Particle',
'pb' : 'Psychedelic Breakfast',
'pf' : 'Phil Lesh & Friends',
'ph' : 'Phish',
'phil' : 'Phil Lesh & Friends',
'pj' : 'Pearl Jam',
'pm' : 'Pat Metheny',
'pmb' : 'Pat McGee Band',
'pmg' : 'Pat Metheny Group',
'pmt' : 'Pat Metheny Trio',
'pnf' : 'Phil Lesh & Friends',
'porter' : 'George Porter, Jr. & Runnin\' Pardners',
'rad' : 'Radiators',
'rads' : 'Radiators',
'raq' : 'Raq',
'ratdog' : 'Ratdog',
'rd' : 'Ratdog',
'reservoir' : 'Reservoir',
'rezi' : 'Rezi',
'rg' : 'Reid Genauer',
'rre' : 'Railroad Earth',
'rrfb' : 'Robert Randolph & the Family Band',
'rw2c' : 'Robert Walter\'s 20th Congress',
'rwtc' : 'Robert Walter\'s 20th Congress',
'schas' : 'Santa Cruz Hemp Allstars',
'schwillb' : 'The Schwillbillies',
'sci' : 'String Cheese Incident',
'sco' : 'John Scofield',
'sexmob' : 'Sex Mob',
'sf' : 'Strangefolk',
'sk' : 'Kimock',
'skb' : 'Steve Kimock Band',
'sl' : 'The Slip',
'slip' : 'The Slip',
'sonicyouth' : 'Sonic Youth',
'soulive' : 'Soulive',
'spin' : 'Spin Doctors',
'spod' : 'Serial Pod',
'spr' : 'Michael Franti & Spearhead',
'ss' : 'Stockholm Syndrome',
'st' : 'Shaking Tree',
'stocksyn' : 'Stockholm Syndrome',
'syb' : 'Seth Yacovone Band',
'tab' : 'Trey Anastasio (Band)',
'td' : 'The Dead',
'tend' : 'Tenacious D',
'tlg' : 'Tea Leaf Green',
'tn' : 'The Nadas',
'tnd' : 'The New Deal',
'too' : 'The Other Ones',
'tortoise' : 'Tortoise',
'tr' : 'Tim Reynolds',
'trey' : 'Trey Anastasio (Band)',
'um' : 'Umphrey\'s McGee',
'umph' : 'Umphrey\'s McGee',
'us' : 'Uncle Sammy',
'vb' : 'Vida Blue',
'wb4t' : 'Will Bernard 4tet',
'ween' : 'Ween',
'wh' : 'Warren Haynes',
'wilco' : 'Wilco',
'word' : 'The Word',
'wp' : 'Widespread Panic',
'wsp' : 'Widespread Panic',
'wu' : 'The Big Wu',
'ymsb' : 'Yonder Mountain String Band',
'zero' : 'Zero',
'zm' : 'Zony Mash',
'zwan' : 'Zwan'
}
BandUrl = {'3ah' : 'www.myspace.com/3appleshighmusic', # '3 Apples High',
'D&T' : 'www.davematthewsband.com', # 'Dave Matthews & Tim Reynolds',
'DM' : 'www.davematthewsband.com', # 'Dave Matthews (solo)',
'FB' : 'www.lesclaypool.com', # 'Les Claypool\'s Fearless Flying Frog Brigade',
'Garcia' : 'www.dead.net', # 'Jerry Garcia',
'abb' : 'www.allmanbrothersband.com', # 'Allman Brothers Band',
'ah&f' : 'www.garajmahaljams.com', # 'Alan Hertz and Friends',
'ahf' : 'www.garajmahaljams.com', # 'Alan Hertz and Friends',
'ahp' : 'www.garajmahaljams.com', # 'Alan Hertz and Friends',
'amendola' : 'www.scottamendola.com', # 'Scott Amendola Band',
'amf' : 'www.myspace.com/Amfibian', # 'Amfibian',
'bc' : 'www.blackcrowes.com', # 'Black Crowes',
'bcue' : 'www.myspace.com/bbcue', # 'Barbara Cue',
'be' : 'www.euphio.net', # 'Bockman\'s Euphio',
'beanland' : 'www.beanland.net', # 'Beanlan',
'bfft' : 'www.belafleck.com', # 'Bela Fleck & the Flecktones',
'bfm' : 'www.myspace.com/barefootmanner', # 'Barefoot Manner',
'bftt' : 'www.belafleck.com', # 'Bela Fleck & Tony Trischka',
'bgug' : 'www.bluegroundundergrass.com', # 'Blueground Undergrass',
'bh' : 'www.benharper.com', # 'Ben Harper',
'bh' : 'www.brucehornsby.com', # 'Bruce Hornsby',
'bhic' : 'www.benharper.com', # 'Ben Harper and the Innocent Criminals',
'bij' : 'www.myspace.com/bigjapan', # 'Big In Japan',
'bmelon' : 'www.myspace.com/blindmelon', # 'Blind Melon',
'bnb' : 'www.myspace.com/burtneilsonband', # 'Burt Neilson Band',
'bog' : 'UNKNOWN', # 'A Band of Gypsys',
'bp' : 'www.brotherspast.com', # 'Brothers Past',
'bruce' : 'www.brucehornsby.com', # 'Bruce Hornsby',
'bs' : 'www.bigsmithband.com', # 'Big Smith',
'bspear' : 'www.burningspear.net', # 'Burning Spear',
'bt' : 'www.bluestraveler.com', # 'Blues Traveler',
'bts' : 'www.builttospill.com', # 'Built to Spill',
'buho' : 'www.elbuho.com', # 'El Buho',
'cb' : 'www.crittersbuggin.com', # 'Critters Buggin',
'cbobb' : 'www.lesclaypool.com', # 'Col. Claypool\'s Bucket of Bernie Brains',
'cc' : 'www.countingcrows.com', # 'Counting Crows',
'ccity' : 'www.myspace.com/ceruleancity', # 'Cerulean City',
'ch' : 'www.charliehunter.com', # 'Charlie Hunter',
'cj' : 'latentrecordings.com/cowboyjunkies', # 'Cowboy Junkies',
'claypool' : 'www.lesclaypool.com', # 'Les Claypool\'s Fearless Flying Frog Brigade',
'cracker' : 'www.crackersoul.com', # 'Cracker',
'crowes' : 'www.blackcrowes.com', # 'Black Crowes',
'cvb' : 'www.campervanbeethoven.com', # 'Camper van Beethoven',
'db' : 'www.discobiscuits.com', # 'Disco Biscuits',
'dbb' : 'www.deepbananablackout.com', # 'Deep Banana Blackout',
'dbr' : 'www.daybytheriver.org', # 'Day by the River',
'dbt' : 'www.drivebytruckers.com', # 'Drive-By Truckers',
'ddbb' : 'www.dirtydozenbrass.com', # 'Dirty Dozen Brass Band',
'dead' : 'www.dead.net', # 'The Dead',
'dgray' : 'www.davidgray.com', # 'David Gray',
'disp' : 'www.dispatchmusic.com', # 'Dispatch',
'djlogic' : 'www.djlogic.com', # 'DJ Logic',
'dmb' : 'www.davematthewsband.com', # 'Dave Matthews Band',
'dnb' : 'www.nelsonband.com', # 'David Nelson Band',
'dso' : 'www.darkstarorchestra.net', # 'Dark Star Orchestra',
'dt' : 'www.derektrucks.com', # 'Derek Trucks Band',
'dtb' : 'www.donnathebuffalo.com', # 'Donna the Buffalo',
'eb' : 'www.ediebrickell.com', # 'Edie Brickell',
'eh' : 'www.ekoostik.com', # 'Ekoostik Hookah',
'farrah' : 'UNKNOWN', # 'Farrah',
'fffb' : 'www.lesclaypool.com', # 'Les Claypool\'s Fearless Flying Frog Brigade',
'fhg' : 'UNKNOWN', # 'Fareed Haque Group',
'gal' : 'UNKNOWN', # 'Galactic',
'garaj' : 'www.garajmahaljams.com', # 'Garaj Mahal',
'gba' : 'UNKNOWN', # 'GreyBoy AllStars',
'gd' : 'www.dead.net', # 'Grateful Dead',
'glove' : 'UNKNOWN', # 'G. Love and Special Sauce',
'gm' : 'www.mule.net', # 'Gov\'t Mule',
'gsw' : 'UNKNOWN', # 'God Street Wine',
'gt' : 'UNKNOWN', # 'Ghost Trane',
'gtb' : 'UNKNOWN', # 'Grand Theft Bus',
'gus' : 'UNKNOWN', # 'Guster',
'guster' : 'UNKNOWN', # 'Guster',
'guymalone' : 'UNKNOWN', # 'Guy Malone',
'hday' : 'UNKNOWN', # 'Howie Day',
'hip' : 'www.thehip.com', # 'The Tragically Hip',
'ho' : 'UNKNOWN', # 'Schleigho',
'ht' : 'www.hottuna.com', # 'Hot Tuna',
'ig' : 'UNKNOWN', # 'Indigo Girls',
'itf' : 'UNKNOWN', # 'Indiana Trip Factory',
'jg' : 'www.dead.net', # 'Jerry Garcia',
'jgb' : 'www.dead.net', # 'Jerry Garcia Band',
'jh' : 'www.jimi-hendrix.com', # 'Jimi Hendrix Experience',
'jhbg' : 'UNKNOWN', # 'A Band of Gypsys',
'jhe' : 'www.jimi-hendrix.com', # 'Jimi Hendrix Experience',
'jj' : 'UNKNOWN', # 'Jack Johnson',
'jjj' : 'UNKNOWN', # 'Jerry Joseph & The Jackmormons',
'jk' : 'www.hottuna.com', # 'Jorma Kaukonen',
'jmayer' : 'UNKNOWN', # 'John Mayer',
'jmos' : 'UNKNOWN', # 'Jerry Joseph & The Jackmormons',
'jmp' : 'UNKNOWN', # 'Jazz Mandolin Project',
'jol' : 'UNKNOWN', # 'Jolene',
'jt' : 'www.wilcoworld.net', # 'Jeff Tweedy',
'kaikln' : 'UNKNOWN', # 'Kai Kln',
'kdcw' : 'UNKNOWN', # 'Karl Denson and Chris Wood',
'kdtu' : 'UNKNOWN', # 'Karl Denson\'s Tiny Universe',
'kdub' : 'UNKNOWN', # 'Keller Williams',
'kmck' : 'UNKNOWN', # 'Steve Kimock & Friends',
'kudzu' : 'UNKNOWN', # 'Kudzu Kings',
'kvhw' : 'UNKNOWN', # 'KVHW',
'kw' : 'UNKNOWN', # 'Keller Williams',
'kwi' : 'UNKNOWN', # 'Keller Williams String Cheese Incident',
'laf' : 'UNKNOWN', # 'Life After Failing',
'lcbbb' : 'www.lesclaypool.com', # 'Col. Claypool\'s Bucket of Bernie Brains',
'lcbobb' : 'www.lesclaypool.com', # 'Col. Claypool\'s Bucket of Bernie Brains',
'lcfffb' : 'www.lesclaypool.com', # 'Les Claypool\'s Fearless Flying Frog Brigade',
'ld' : 'UNKNOWN', # 'Living Daylights',
'lf' : 'UNKNOWN', # 'Little Feat',
'lfb' : 'UNKNOWN', # 'Lo Faber Band',
'logic' : 'UNKNOWN', # 'DJ Logic',
'lom' : 'www.dead.net', # 'Legion of Mary',
'los' : 'www.leftoversalmon.com', # 'Leftover Salmon',
'ls' : 'www.leftoversalmon.com', # 'Leftover Salmon',
'lt' : 'UNKNOWN', # 'Lake Trout',
'mammals' : 'UNKNOWN', # 'The Mammals',
'marlow' : 'UNKNOWN', # 'Marlow',
'mcrx' : 'UNKNOWN', # 'Mike Clark\'s Prescription Renewal',
'mel' : 'UNKNOWN', # 'Marcus Eaton & The Lobby',
'metheny' : 'UNKNOWN', # 'Pat Metheny',
'mf' : 'UNKNOWN', # 'Marc Ford',
'mfs' : 'UNKNOWN', # 'Michael Franti & Spearhead',
'mmw' : 'UNKNOWN', # 'Medeski Martin & Wood',
'moe' : 'www.moe.org', # 'moe.',
'mule' : 'www.mule.net', # 'Gov\'t Mule',
'nd' : 'UNKNOWN', # 'The New Deal',
'nmas' : 'UNKNOWN', # 'North Mississippi Allstars',
'oar' : 'UNKNOWN', # 'O.A.R. (Of A Revolution)',
'oh' : 'www.oysterhead.com', # 'Oysterhead',
'or' : 'www.oregonband.com', # 'Oregon',
'oregon' : 'www.oregonband.com', # 'Oregon',
'oysterhead' : 'www.oysterhead.com', # 'Oysterhead',
'p&f' : 'www.phillesh.net', # 'Phil Lesh & Friends',
'p+f' : 'www.phillesh.net', # 'Phil Lesh & Friends',
'paf' : 'www.phillesh.net', # 'Phil Lesh & Friends',
'par' : 'www.particlepeople.com', # 'Particle',
'particle' : 'www.particlepeople.com', # 'Particle',
'pb' : 'www.thebreakfast.org', # 'Psychedelic Breakfast',
'pf' : 'www.phillesh.net', # 'Phil Lesh & Friends',
'ph' : 'www.phish.com', # 'Phish',
'phil' : 'www.phillesh.net', # 'Phil Lesh & Friends',
'pj' : 'UNKNOWN', # 'Pearl Jam',
'pm' : 'UNKNOWN', # 'Pat Metheny',
'pmb' : 'UNKNOWN', # 'Pat McGee Band',
'pmg' : 'UNKNOWN', # 'Pat Metheny Group',
'pmt' : 'UNKNOWN', # 'Pat Metheny Trio',
'pnf' : 'www.phillesh.net', # 'Phil Lesh & Friends',
'porter' : 'UNKNOWN', # 'George Porter, Jr. & Runnin\' Pardners',
'rad' : 'www.theradiators.org', # 'Radiators',
'rads' : 'www.theradiators.org', # 'Radiators',
'raq' : 'UNKNOWN', # 'Raq',
'ratdog' : 'www.rat-dog.com', # 'Ratdog',
'rd' : 'www.rat-dog.com', # 'Ratdog',
'reservoir' : 'UNKNOWN', # 'Reservoir',
'rezi' : 'UNKNOWN', # 'Rezi',
'rg' : 'UNKNOWN', # 'Reid Genauer',
'rre' : 'UNKNOWN', # 'Railroad Earth',
'rrfb' : 'UNKNOWN', # 'Robert Randolph & the Family Band',
'rw2c' : 'UNKNOWN', # 'Robert Walter\'s 20th Congress',
'rwtc' : 'UNKNOWN', # 'Robert Walter\'s 20th Congress',
'schas' : 'UNKNOWN', # 'Santa Cruz Hemp Allstars',
'schwillb' : 'UNKNOWN', # 'The Schwillbillies',
'sci' : 'UNKNOWN', # 'String Cheese Incident',
'sco' : 'UNKNOWN', # 'John Scofield',
'sexmob' : 'UNKNOWN', # 'Sex Mob',
'sf' : 'UNKNOWN', # 'Strangefolk',
'sk' : 'www.kimock.com', # 'Kimock',
'skb' : 'www.kimock.com', # 'Steve Kimock Band',
'sl' : 'UNKNOWN', # 'The Slip',
'slip' : 'UNKNOWN', # 'The Slip',
'sonicyouth' : 'www.sonicyouth.com', # 'Sonic Youth',
'soulive' : 'UNKNOWN', # 'Soulive',
'spin' : 'UNKNOWN', # 'Spin Doctors',
'spod' : 'UNKNOWN', # 'Serial Pod',
'spr' : 'UNKNOWN', # 'Michael Franti & Spearhead',
'ss' : 'UNKNOWN', # 'Stockholm Syndrome',
'st' : 'UNKNOWN', # 'Shaking Tree',
'stocksyn' : 'UNKNOWN', # 'Stockholm Syndrome',
'syb' : 'UNKNOWN', # 'Seth Yacovone Band',
'tab' : 'UNKNOWN', # 'Trey Anastasio (Band)',
'td' : 'www.dead.net', # 'The Dead',
'tend' : 'UNKNOWN', # 'Tenacious D',
'tlg' : 'UNKNOWN', # 'Tea Leaf Green',
'tn' : 'UNKNOWN', # 'The Nadas',
'tnd' : 'UNKNOWN', # 'The New Deal',
'too' : 'www.dead.net', # 'The Other Ones',
'tortoise' : 'UNKNOWN', # 'Tortoise',
'tr' : 'UNKNOWN', # 'Tim Reynolds',
'trey' : 'UNKNOWN', # 'Trey Anastasio (Band)',
'um' : 'www.umphreys.com', # 'Umphrey\'s McGee',
'umph' : 'www.umphreys.com', # 'Umphrey\'s McGee',
'us' : 'UNKNOWN', # 'Uncle Sammy',
'vb' : 'UNKNOWN', # 'Vida Blue',
'wb4t' : 'UNKNOWN', # 'Will Bernard 4tet',
'ween' : 'UNKNOWN', # 'Ween',
'wh' : 'UNKNOWN', # 'Warren Haynes',
'wilco' : 'www.wilcoworld.net', # 'Wilco',
'word' : 'UNKNOWN', # 'The Word',
'wp' : 'www.widespreadpanic.com', # 'Widespread Panic',
'wsp' : 'www.widespreadpanic.com', # 'Widespread Panic',
'wu' : 'UNKNOWN', # 'The Big Wu',
'ymsb' : 'UNKNOWN', # 'Yonder Mountain String Band',
'zero' : 'www.zerolive.com', # 'Zero',
'zm' : 'UNKNOWN', # 'Zony Mash',
'zwan' : 'UNKNOWN' # 'Zwan'
}
# -----------------------------------------------------------------------
#
# __init__
#
# -----------------------------------------------------------------------
def __init__(self):
"""
Initializes this class with the following variables
debug = False
"""
self.debug = False
#
# End of __init__
#
# -----------------------------------------------------------------------
#
# turnOnDebug
#
# -----------------------------------------------------------------------
def turnOnDebug(self):
"""
Set the debug flag to True
"""
self.debug = True
return
#
# End of turnOnDebug
#
# -----------------------------------------------------------------------
#
# turnOffDebug
#
# -----------------------------------------------------------------------
def turnOffDebug(self):
"""
Set the debug flag to False
"""
self.debug = False
return
#
# End of turnOffDebug
#
# -----------------------------------------------------------------------
#
# loadBandDB
#
# -----------------------------------------------------------------------
def loadBandDB(self):
# D = {}
# D['a'] = {'name': 'bob', 'age': 43}
# name = D['a']['name']
for name in BandNames:
full_name = BandNames[name]
url = BandUrl[name]
BandDB[name] = {'full_name': full_name, 'url': url}
return
#
# End of loadBandDB
#
def createBandDB(self, D):
fqp = D['fqp']
dbh = D['dbh']
return
#
# End of createBandDB
#
#
# End of class pyetree_bandnames
#
# -----------------------------------------------------------------------
#
# < End of pyetree_bandnames >
#
# -----------------------------------------------------------------------
| apache-2.0 |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/src/lib/volume.py | 64 | 2002 | # pylint: skip-file
# flake8: noqa
class Volume(object):
''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
}
volumes_path = {"pod": "spec.volumes",
"dc": "spec.template.spec.volumes",
"rc": "spec.template.spec.volumes",
}
@staticmethod
def create_volume_structure(volume_info):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
volume_type = volume_info['type'].lower()
if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
elif volume_type == 'configmap':
volume['configMap'] = {}
volume['configMap']['name'] = volume_info['configmap_name']
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
return (volume, volume_mount)
| apache-2.0 |
varunarya10/boto | tests/integration/ec2/test_cert_verification.py | 126 | 1549 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.ec2
class EC2CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
ec2 = True
regions = boto.ec2.regions()
def sample_service_call(self, conn):
conn.get_all_reservations()
| mit |
npiganeau/odoo | addons/base_gengo/res_company.py | 24 | 1784 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.Model):
_name = "res.company"
_inherit = "res.company"
_columns = {
"gengo_private_key": fields.text("Gengo Private Key", copy=False),
"gengo_public_key": fields.text("Gengo Public Key", copy=False),
"gengo_comment": fields.text("Comments", help="This comment will be automatically be enclosed in each an every request sent to Gengo"),
"gengo_auto_approve": fields.boolean("Auto Approve Translation ?", help="Jobs are Automatically Approved by Gengo."),
"gengo_sandbox": fields.boolean("Sandbox Mode", help="Check this box if you're using the sandbox mode of Gengo, mainly used for testing purpose."),
}
_defaults = {
"gengo_auto_approve": True,
}
| agpl-3.0 |
trondhindenes/ansible | test/units/modules/network/netscaler/test_netscaler_gslb_site.py | 68 | 24193 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from units.compat.mock import patch, Mock, MagicMock, call
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerGSLBSiteModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite.gslbsite': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
super(TestNetscalerGSLBSiteModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerGSLBSiteModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_gslb_site
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_gslb_site.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_gslb_site.nitro_exception', MockException):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_ensure_feature_is_enabled_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
gslb_site_proxy_mock = Mock()
ensure_feature_is_enabled_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=Mock(return_value=client_mock),
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=ensure_feature_is_enabled_mock,
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
ensure_feature_is_enabled_mock.assert_called_with(client_mock, 'GSLB')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_new_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_has_calls([call.add()])
def test_modified_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(),
nitro_exception=self.MockException,
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_has_calls([call.update()])
def test_absent_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, False]),
gslb_site_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_has_calls([call.delete()])
def test_present_gslb_site_identical_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_not_called()
def test_absent_gslb_site_noop_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[False, False]),
gslb_site_identical=Mock(side_effect=[False, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_not_called()
def test_present_gslb_site_failed_update(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'GSLB site differs from configured')
self.assertTrue(result['failed'])
def test_present_gslb_site_failed_create(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[False, False]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'GSLB site does not exist')
self.assertTrue(result['failed'])
def test_present_gslb_site_update_immutable_attribute(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=['domain']),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'Cannot update immutable attributes [\'domain\']')
self.assertTrue(result['failed'])
def test_absent_gslb_site_failed_delete(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'GSLB site still exists')
self.assertTrue(result['failed'])
def test_graceful_nitro_exception_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
gslb_site_exists=m,
ensure_feature_is_enabled=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
def test_graceful_nitro_exception_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
gslb_site_exists=m,
ensure_feature_is_enabled=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
| gpl-3.0 |
vishnuvaradaraj/firefox-ios | scripts/export-xliff.py | 49 | 1380 | #!/usr/bin/env python
#
# xliff-export.py xcodeproj-path l10n-path
#
# Export all locales that are present in the l10n directory. We use xcodebuild
# to export and write to l10n-directory/$locale/firefox-ios.xliff so that it
# can be easily imported into svn. (which is a manual step)
#
# Example:
#
# cd firefox-ios
# ./xliff-export.py Client.xcodeproj ../firefox-ios-l10n
#
import glob
import os
import shutil
import subprocess
import sys
LOCALES_TO_SKIP = ['pl']
def available_locales(l10n_path):
for xliff_path in glob.glob(l10n_path + "/*/firefox-ios.xliff"):
parts = xliff_path.split(os.sep)
yield parts[-2]
if __name__ == "__main__":
project_path = sys.argv[1]
l10n_path = sys.argv[2]
for locale in available_locales(l10n_path):
if locale in LOCALES_TO_SKIP:
continue
command = [
"xcodebuild",
"-exportLocalizations",
"-localizationPath", "/tmp/xliff",
"-project", project_path,
"-exportLanguage", locale
]
print "Exporting '%s' to '/tmp/xliff/%s.xliff'" % (locale, locale)
subprocess.call(command)
src_path = "/tmp/xliff/%s.xliff" % locale
dst_path = "%s/%s/firefox-ios.xliff" % (l10n_path, locale)
print "Copying '%s' to '%s'" % (src_path, dst_path)
shutil.copy(src_path, dst_path)
| mpl-2.0 |
runt18/mod_python | lib/python/mod_python/Cookie.py | 3 | 12964 | # vim: set sw=4 expandtab :
#
# Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy
# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Originally developed by Gregory Trubetskoy.
#
"""
This module contains classes to support HTTP State Management
Mechanism, also known as Cookies. The classes provide simple
ways for creating, parsing and digitally signing cookies, as
well as the ability to store simple Python objects in Cookies
(using marshalling).
The behaviour of the classes is designed to be most useful
within mod_python applications.
The current state of HTTP State Management standardization is
rather unclear. It appears that the de-facto standard is the
original Netscape specification, even though already two RFC's
have been put out (RFC2109 (1997) and RFC2965 (2000)). The
RFC's add a couple of useful features (e.g. using Max-Age instead
of Expires, but my limited tests show that Max-Age is ignored
by the two browsers tested (IE and Safari). As a result of this,
perhaps trying to be RFC-compliant (by automatically providing
Max-Age and Version) could be a waste of cookie space...
"""
import sys
import time
import re
import hmac
import marshal
import base64
PY2 = sys.version[0] == '2'
class CookieError(Exception):
pass
class metaCookie(type):
def __new__(cls, clsname, bases, clsdict):
_valid_attr = (
"version", "path", "domain", "secure",
"comment", "expires", "max_age",
# RFC 2965
"commentURL", "discard", "port",
# Microsoft Extension
"httponly" )
# _valid_attr + property values
# (note __slots__ is a new Python feature, it
# prevents any other attribute from being set)
__slots__ = _valid_attr + ("name", "value", "_value",
"_expires", "__data__")
clsdict["_valid_attr"] = _valid_attr
clsdict["__slots__"] = __slots__
def set_expires(self, value):
if type(value) == type(""):
# if it's a string, it should be
# valid format as per Netscape spec
try:
t = time.strptime(value, "%a, %d-%b-%Y %H:%M:%S GMT")
except ValueError:
raise ValueError("Invalid expires time: %s" % value)
t = time.mktime(t)
else:
# otherwise assume it's a number
# representing time as from time.time()
t = value
value = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT",
time.gmtime(t))
self._expires = "%s" % value
def get_expires(self):
return self._expires
clsdict["expires"] = property(fget=get_expires, fset=set_expires)
return type.__new__(cls, clsname, bases, clsdict)
# metaclass= workaround, see
# http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/#using-the-metaclass-in-python-2-x-and-3-x
_metaCookie = metaCookie('Cookie', (object, ), {})
class Cookie(_metaCookie):
"""
This class implements the basic Cookie functionality. Note that
unlike the Python Standard Library Cookie class, this class represents
a single cookie (not a list of Morsels).
"""
DOWNGRADE = 0
IGNORE = 1
EXCEPTION = 3
def parse(Class, str, **kw):
"""
Parse a Cookie or Set-Cookie header value, and return
a dict of Cookies. Note: the string should NOT include the
header name, only the value.
"""
dict = _parse_cookie(str, Class, **kw)
return dict
parse = classmethod(parse)
def __init__(self, name, value, **kw):
"""
This constructor takes at least a name and value as the
arguments, as well as optionally any of allowed cookie attributes
as defined in the existing cookie standards.
"""
self.name, self.value = name, value
for k in kw:
setattr(self, k.lower(), kw[k])
# subclasses can use this for internal stuff
self.__data__ = {}
def __str__(self):
"""
Provides the string representation of the Cookie suitable for
sending to the browser. Note that the actual header name will
not be part of the string.
This method makes no attempt to automatically double-quote
strings that contain special characters, even though the RFC's
dictate this. This is because doing so seems to confuse most
browsers out there.
"""
result = ["%s=%s" % (self.name, self.value)]
for name in self._valid_attr:
if hasattr(self, name):
if name in ("secure", "discard", "httponly"):
result.append(name)
else:
result.append("%s=%s" % (name, getattr(self, name)))
return "; ".join(result)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__,
str(self))
class SignedCookie(Cookie):
"""
This is a variation of Cookie that provides automatic
cryptographic signing of cookies and verification. It uses
the HMAC support in the Python standard library. This ensures
that the cookie has not been tamprered with on the client side.
Note that this class does not encrypt cookie data, thus it
is still plainly visible as part of the cookie.
"""
def parse(Class, s, secret, mismatch=Cookie.DOWNGRADE, **kw):
dict = _parse_cookie(s, Class, **kw)
del_list = []
for k in dict:
c = dict[k]
try:
c.unsign(secret)
except CookieError:
if mismatch == Cookie.EXCEPTION:
raise
elif mismatch == Cookie.IGNORE:
del_list.append(k)
else:
# downgrade to Cookie
dict[k] = Cookie.parse(Cookie.__str__(c))[k]
for k in del_list:
del dict[k]
return dict
parse = classmethod(parse)
def __init__(self, name, value, secret=None, **kw):
Cookie.__init__(self, name, value, **kw)
self.__data__["secret"] = secret
def hexdigest(self, str):
if not self.__data__["secret"]:
raise CookieError("Cannot sign without a secret")
_hmac = hmac.new(self.__data__["secret"], self.name)
_hmac.update(str)
if PY2:
return _hmac.hexdigest()
else:
return _hmac.hexdigest().decode()
def __str__(self):
result = ["%s=%s%s" % (self.name, self.hexdigest(self.value),
self.value)]
for name in self._valid_attr:
if hasattr(self, name):
if name in ("secure", "discard", "httponly"):
result.append(name)
else:
result.append("%s=%s" % (name, getattr(self, name)))
return "; ".join(result)
def unsign(self, secret):
sig, val = self.value[:32], self.value[32:]
mac = hmac.new(secret, self.name)
mac.update(val)
if mac.hexdigest() == sig:
self.value = val
self.__data__["secret"] = secret
else:
raise CookieError("Incorrectly Signed Cookie: %s=%s" % (self.name, self.value))
class MarshalCookie(SignedCookie):
"""
This is a variation of SignedCookie that can store more than
just strings. It will automatically marshal the cookie value,
therefore any marshallable object can be used as value.
The standard library Cookie module provides the ability to pickle
data, which is a major security problem. It is believed that unmarshalling
(as opposed to unpickling) is safe, yet we still err on the side of caution
which is why this class is a subclass of SignedCooke making sure what
we are about to unmarshal passes the digital signature test.
Here is a link to a sugesstion that marshalling is safer than unpickling
http://groups.google.com/groups?hl=en&lr=&ie=UTF-8&selm=7xn0hcugmy.fsf%40ruckus.brouhaha.com
"""
def parse(Class, s, secret, mismatch=Cookie.DOWNGRADE, **kw):
dict = _parse_cookie(s, Class, **kw)
del_list = []
for k in dict:
c = dict[k]
try:
c.unmarshal(secret)
except CookieError:
if mismatch == Cookie.EXCEPTION:
raise
elif mismatch == Cookie.IGNORE:
del_list.append(k)
else:
# downgrade to Cookie
dict[k] = Cookie.parse(Cookie.__str__(c))[k]
for k in del_list:
del dict[k]
return dict
parse = classmethod(parse)
def __str__(self):
m = base64.encodestring(marshal.dumps(self.value))
# on long cookies, the base64 encoding can contain multiple lines
# separated by \n or \r\n
m = ''.join(m.split())
result = ["%s=%s%s" % (self.name, self.hexdigest(m), m)]
for name in self._valid_attr:
if hasattr(self, name):
if name in ("secure", "discard", "httponly"):
result.append(name)
else:
result.append("%s=%s" % (name, getattr(self, name)))
return "; ".join(result)
def unmarshal(self, secret):
self.unsign(secret)
try:
data = base64.decodestring(self.value)
except:
raise CookieError("Cannot base64 Decode Cookie: %s=%s" % (self.name, self.value))
try:
self.value = marshal.loads(data)
except (EOFError, ValueError, TypeError):
raise CookieError("Cannot Unmarshal Cookie: %s=%s" % (self.name, self.value))
# This is a simplified and in some places corrected
# (at least I think it is) pattern from standard lib Cookie.py
_cookiePattern = re.compile(
r"(?x)" # Verbose pattern
r"[,\ ]*" # space/comma (RFC2616 4.2) before attr-val is eaten
r"(?P<key>" # Start of group 'key'
r"[^;\ =]+" # anything but ';', ' ' or '='
r")" # End of group 'key'
r"\ *(=\ *)?" # a space, then may be "=", more space
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # a doublequoted string
r"|" # or
r"[^;]*" # any word or empty string
r")" # End of group 'val'
r"\s*;?" # probably ending in a semi-colon
)
def _parse_cookie(str, Class, names=None):
# XXX problem is we should allow duplicate
# strings
result = {}
matchIter = _cookiePattern.finditer(str)
for match in matchIter:
key, val = match.group("key"), match.group("val")
# We just ditch the cookies names which start with a dollar sign since
# those are in fact RFC2965 cookies attributes. See bug [#MODPYTHON-3].
if key[0]!='$' and names is None or key in names:
result[key] = Class(key, val)
return result
def add_cookie(req, cookie, value="", **kw):
"""
Sets a cookie in outgoing headers and adds a cache
directive so that caches don't cache the cookie.
"""
# is this a cookie?
if not isinstance(cookie, Cookie):
# make a cookie
cookie = Cookie(cookie, value, **kw)
if "Set-Cookie" not in req.headers_out:
req.headers_out.add("Cache-Control", 'no-cache="set-cookie"')
req.headers_out.add("Set-Cookie", str(cookie))
def get_cookies(req, Class=Cookie, **kw):
"""
A shorthand for retrieveing and parsing cookies given
a Cookie class. The class must be one of the classes from
this module.
"""
if "cookie" not in req.headers_in:
return {}
cookies = req.headers_in["cookie"]
if type(cookies) == type([]):
cookies = '; '.join(cookies)
return Class.parse(cookies, **kw)
def get_cookie(req, name, Class=Cookie, **kw):
cookies = get_cookies(req, Class, names=[name], **kw)
if name in cookies:
return cookies[name]
| apache-2.0 |
kirmani/lockman | MC/RPi.GPIO-0.1.0/build/lib.linux-armv6l-2.7/RPi/GPIO/__init__.py | 4 | 3646 | #!/usr/bin/env python
# Copyright (c) 2012 Ben Croston
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import atexit
# pins start from 1, tuple index starts from 0
_GPIO_PINS = (None, None, None, '0', None, '1', None, '4', '14', None, '15', '17', '18', '21', None, '22', '23', None, '24', '10', None, '9', '25', '11', '8', None, '7')
IN = 'in'
OUT = 'out'
_ExportedIds = {}
class InvalidPinException(Exception):
"""The pin sent is invalid on a Raspberry Pi"""
pass
class InvalidDirectionException(Exception):
"""An invalid direction was passed to setup()"""
pass
class WrongDirectionException(Exception):
"""The GPIO channel has not been set up or is set up in the wrong direction"""
pass
def _GetValidId(pin):
try:
value = _GPIO_PINS[int(pin)]
except:
raise InvalidPinException
if value is None or pin < 1:
raise InvalidPinException
return value
def setup(pin, direction):
"""
Set up the GPIO channel and direction
pin - RPi board GPIO pin number (not SOC pin number). Pins start from 1
direction - IN or OUT
"""
id = _GetValidId(pin)
if direction != IN and direction != OUT:
raise InvalidDirectionException
# unexport if it exists
if os.path.exists('/sys/class/gpio/gpio%s'%id):
with open('/sys/class/gpio/unexport', 'w') as f:
f.write(id)
# export
with open('/sys/class/gpio/export', 'w') as f:
f.write(id)
# set i/o direction
with open('/sys/class/gpio/gpio%s/direction'%id, 'w') as f:
f.write(direction)
_ExportedIds[id] = direction
def output(pin, value):
"""Write to a GPIO channel"""
id = _GetValidId(pin)
if id not in _ExportedIds or _ExportedIds[id] != OUT:
raise WrongDirectionException
with open('/sys/class/gpio/gpio%s/value'%id, 'w') as f:
f.write('1' if value else '0')
def input(pin):
"""Read from a GPIO channel"""
id = _GetValidId(pin)
if id not in _ExportedIds or _ExportedIds[id] != IN:
raise WrongDirectionException
with open('/sys/class/gpio/gpio%s/value'%id, 'r') as f:
return f.read(1) == '1'
# clean up routine
def _unexport():
"""Clean up by unexporting evey channel that we have set up"""
for id in _ExportedIds:
if os.path.exists('/sys/class/gpio/gpio%s'%id):
with open('/sys/class/gpio/unexport', 'w') as f:
f.write(id)
atexit.register(_unexport)
if __name__ == '__main__':
# assumes pin 11 INPUT
# pin 12 OUTPUT
setup(11, IN)
setup(12, OUT)
print(input(11))
output(12, True)
| mit |
zakuro9715/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/many_to_one_regress/tests.py | 92 | 4466 | from django.db import models
from django.test import TestCase
from models import First, Second, Third, Parent, Child, Category, Record, Relation
class ManyToOneRegressionTests(TestCase):
def test_object_creation(self):
Third.objects.create(id='3', name='An example')
parent = Parent(name='fred')
parent.save()
Child.objects.create(name='bam-bam', parent=parent)
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertTrue(c.parent is p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertFalse(c.parent is p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertTrue(c.parent is p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertTrue(p.bestchild is None)
# bestchild should still be None after saving.
p.save()
self.assertTrue(p.bestchild is None)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertTrue(p.bestchild is None)
# Assigning None fails: Child.parent is null=False.
self.assertRaises(ValueError, setattr, c, "parent", None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, c, "parent", First(id=1, second=1))
# Nor can you explicitly assign None to Child.parent during object
# creation (regression for #9649).
self.assertRaises(ValueError, Child, name='xyzzy', parent=None)
self.assertRaises(ValueError, Child.objects.create, name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertTrue(c.parent is p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
c = Child(parent=p)
self.assertTrue(c.parent is p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertFalse(c.parent is p)
self.assertEqual(c.parent, p)
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
r = Relation.objects.create(left=r1, right=r2)
r = Relation.objects.create(left=r3, right=r4)
r = Relation.objects.create(left=r1, right=r3)
r = Relation.objects.create(left=r5, right=r2)
r = Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
self.assertRaises(ValueError, Child.objects.create, name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category)
self.assertEqual('id', cat.rel.get_related_field().name)
| gpl-3.0 |
Ant-OS/android_packages_apps_OTAUpdates | jni/boost_1_57_0/libs/python/test/operators.py | 46 | 1166 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from operators_ext import *
Check __nonzero__ support
>>> assert X(2)
>>> assert not X(0)
----
>>> x = X(42)
>>> x.value()
42
>>> y = x - X(5)
>>> y.value()
37
>>> y = x - 4
>>> y.value()
38
>>> y = 3 - x
>>> y.value()
-39
>>> (-y).value()
39
>>> (x + y).value()
3
>>> abs(y).value()
39
>>> x < 10
0
>>> x < 43
1
>>> 10 < x
1
>>> 43 < x
0
>>> x < y
0
>>> y < x
1
------
>>> x > 10
1
>>> x > 43
0
>>> 10 > x
0
>>> 43 > x
1
>>> x > y
1
>>> y > x
0
>>> y = x - 5
>>> x -= y
>>> x.value()
5
>>> str(x)
'5'
>>> z = Z(10)
>>> int(z)
10
>>> float(z)
10.0
>>> complex(z)
(10+0j)
>>> pow(2,x)
32
>>> pow(x,2).value()
25
>>> pow(X(2),x).value()
32
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| apache-2.0 |
mistercrunch/airflow | airflow/migrations/versions/45ba3f1493b9_add_k8s_yaml_to_rendered_templates.py | 9 | 1689 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add-k8s-yaml-to-rendered-templates
Revision ID: 45ba3f1493b9
Revises: 364159666cbd
Create Date: 2020-10-23 23:01:52.471442
"""
import sqlalchemy_jsonfield
from alembic import op
from sqlalchemy import Column
from airflow.settings import json
# revision identifiers, used by Alembic.
revision = '45ba3f1493b9'
down_revision = '364159666cbd'
branch_labels = None
depends_on = None
__tablename__ = "rendered_task_instance_fields"
k8s_pod_yaml = Column('k8s_pod_yaml', sqlalchemy_jsonfield.JSONField(json=json), nullable=True)
def upgrade():
"""Apply add-k8s-yaml-to-rendered-templates"""
with op.batch_alter_table(__tablename__, schema=None) as batch_op:
batch_op.add_column(k8s_pod_yaml)
def downgrade():
"""Unapply add-k8s-yaml-to-rendered-templates"""
with op.batch_alter_table(__tablename__, schema=None) as batch_op:
batch_op.drop_column('k8s_pod_yaml')
| apache-2.0 |
haoch/incubator-eagle | eagle-external/eagle-ambari/lib/EAGLE/package/scripts/eagle_userprofile_topology.py | 21 | 3337 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from resource_management import *
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import get_unique_id_and_date
from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_JAAS_CONF
from resource_management.core.shell import call
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.validate import call_and_match_output
from actions import *
class EagleUserProfileTopology(Script):
# def get_stack_to_component(self):
# return {"HDP": "EAGLE-TOPOLOGY"}
def install(self, env):
Logger.info('Install Eagle DAM UserProfile Topology')
# self.install_packages(env)
import params
env.set_params(params)
self.configure(env)
# eagle_hdfs_topology_exec(action = 'init')
def configure(self,env):
Logger.info("Configure Eagle DAM UserProfile Topology")
import params
env.set_params(params)
# eagle_hdfs_topology_exec(action = 'init')
def pre_rolling_restart(self,env):
Logger.info("Executing rolling pre-restart Eagle DAM UserProfile Topology")
import params
env.set_params(params)
# if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
# Execute(format("hdp-select set eagle-topology {version}"))
def stop(self, env):
Logger.info('Stopping Eagle DAM UserProfile Topology')
import params
env.set_params(params)
self.configure(env)
eagle_userprofile_topology_exec(action = 'stop')
def start(self, env):
Logger.info('Starting Eagle DAM UserProfile Topology')
import params
env.set_params(params)
self.configure(env)
eagle_userprofile_topology_exec(action = 'init')
eagle_userprofile_topology_exec(action = 'start')
def status(self, env):
Logger.info('Checking Eagle DAM UserProfile Topology status')
import params
env.set_params(params)
self.configure(env)
eagle_userprofile_topology_exec(action = 'status')
if __name__ == "__main__":
EagleUserProfileTopology().execute() | apache-2.0 |
DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/geom.py | 1 | 2622 | from openmdao.main.interfaces import IParametricGeometry, implements
class ParametricGeometry(object):
"""A base class for objects implementing the IParametricGeometry
interface.
"""
implements(IParametricGeometry)
def __init__(self):
self._callbacks = []
def regen_model(self):
"""Rebuid the geometry based on the current values of the input parameters."""
raise NotImplementedError("regen_model")
def list_parameters(self):
"""Return a list of (name, meta) where `name` is the name of the parameter
and `meta` is a metadata dict.
"""
raise NotImplementedError('list_parameters')
def set_parameter(self, name, value):
"""Set the value of the named input parameter."""
raise NotImplementedError("set_parameter")
def get_parameters(self, names):
"""Return a list of values of the named parameters."""
raise NotImplementedError("get_parameters")
def get_static_geometry(self):
"""Return a 'static' instance of this geometry."""
raise NotImplementedError("get_static_geometry")
def register_param_list_changedCB(self, callback):
"""Register a callback that will be called when self.invoke_callbacks() is called.
self.invoke_callbacks() should be called from the inheriting class whenever any
parameters are added, removed, or change their type.
"""
self._callbacks.append(callback)
def invoke_callbacks(self):
"""Invokes any callbacks that have been registered via register_param_list_changedCB."""
for cb in self._callbacks:
cb()
def get_attributes(self, io_only=True):
"""Return an attribute dict for use by the openmdao GUI. You only need to
override this if you have inputs that the user must set directly into your
ParametricGeometry object. For example, GEMParametricGeometry has a model_file
input that the user can set in the GUI to change the .csm file that supplies
the geometry.
"""
# the commented out section below shows an example of how you would report
# the existence of an input named 'model_file' to the OpenMDAO GUI.
return {
'type': type(self).__name__,
'Inputs': [
# {
# 'name': 'model_file',
# 'id': 'model_file',
# 'type': type(self._model_file).__name__,
# 'value': self._model_file,
# 'connected': '',
# }
]
}
| mit |
bytor99999/vertx-web | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| apache-2.0 |
eoghan2t9/Oppo-Find5-4.2-Kernel | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
dungeonsnd/test-code | dev_examples/pyserver/examples/server.py | 1 | 1790 |
class Actor(gevent.Greenlet):
def __init__(self):
self.inbox = Queue()
Greenlet.__init__(self)
def onRecv(self, message):
"""
Define in your subclass.
"""
raise NotImplemented()
def _run(self):
self.running = True
while self.running:
message = self.inbox.get()
self.onRecv(message)
import gevent
from gevent.queue import Queue
from gevent import Greenlet
class loginActor(Actor):
def onRecv(self, message):
print(message)
chatAct.inbox.put('loginAct')
gevent.sleep(0)
class chatActor(Actor):
def onRecv(self, message):
print(message)
loginAct.inbox.put('chatAct')
gevent.sleep(0)
import gevent
from gevent.queue import Queue
def Dispatch(line):
if line=='loginAct':
return loginAct.inbox.put('login act returned.')
else if line=='chatActor':
return chatActor.inbox.put('chat act returned.')
else
return None
import gevent.server
def handler(sock, address):
fp = sock.makefile()
while True:
line = fp.readline()
result =Dispatch(line)
if len(result)>0:
fp.write(result)
fp.flush()
else if len(result)==0:
continue
else :
break
sock.shutdown(socket.SHUT_WR)
sock.close()
if __name__ == '__main__':
queueSize =4
queueList =[Queue() for i in xrange(queueSize)]
server = gevent.server.StreamServer(('0.0.0.0', 18600), backlog=1024, handler, spawn=Pool(50000)) # do not accept more than 10000 connections
server.serve_forever()
| gpl-3.0 |
drpjk/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/charade/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| lgpl-2.1 |
ecosoft-odoo/odoo | addons/account/report/account_print_overdue.py | 380 | 3907 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from openerp.osv import osv
class Overdue(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Overdue, self).__init__(cr, uid, name, context=context)
ids = context.get('active_ids')
partner_obj = self.pool['res.partner']
docs = partner_obj.browse(cr, uid, ids, context)
due = {}
paid = {}
mat = {}
for partner in docs:
due[partner.id] = reduce(lambda x, y: x + ((y['account_id']['type'] == 'receivable' and y['debit'] or 0) or (y['account_id']['type'] == 'payable' and y['credit'] * -1 or 0)), self._lines_get(partner), 0)
paid[partner.id] = reduce(lambda x, y: x + ((y['account_id']['type'] == 'receivable' and y['credit'] or 0) or (y['account_id']['type'] == 'payable' and y['debit'] * -1 or 0)), self._lines_get(partner), 0)
mat[partner.id] = reduce(lambda x, y: x + (y['debit'] - y['credit']), filter(lambda x: x['date_maturity'] < time.strftime('%Y-%m-%d'), self._lines_get(partner)), 0)
addresses = self.pool['res.partner']._address_display(cr, uid, ids, None, None)
self.localcontext.update({
'docs': docs,
'time': time,
'getLines': self._lines_get,
'tel_get': self._tel_get,
'message': self._message,
'due': due,
'paid': paid,
'mat': mat,
'addresses': addresses
})
self.context = context
def _tel_get(self,partner):
if not partner:
return False
res_partner = self.pool['res.partner']
addresses = res_partner.address_get(self.cr, self.uid, [partner.id], ['invoice'])
adr_id = addresses and addresses['invoice'] or False
if adr_id:
adr=res_partner.read(self.cr, self.uid, [adr_id])[0]
return adr['phone']
else:
return partner.phone or False
return False
def _lines_get(self, partner):
moveline_obj = self.pool['account.move.line']
movelines = moveline_obj.search(self.cr, self.uid,
[('partner_id', '=', partner.id),
('account_id.type', 'in', ['receivable', 'payable']),
('state', '<>', 'draft'), ('reconcile_id', '=', False)])
movelines = moveline_obj.browse(self.cr, self.uid, movelines)
return movelines
def _message(self, obj, company):
company_pool = self.pool['res.company']
message = company_pool.browse(self.cr, self.uid, company.id, {'lang':obj.lang}).overdue_msg
return message.split('\n')
class report_overdue(osv.AbstractModel):
_name = 'report.account.report_overdue'
_inherit = 'report.abstract_report'
_template = 'account.report_overdue'
_wrapped_report_class = Overdue
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
datamade/jekyll-hook | tasks.py | 1 | 1685 | import pickle
from redis import Redis
import subprocess
from uuid import uuid4
from app_config import SENTRY_DSN, REDIS_QUEUE_KEY
from raven import Client
sentry = Client(SENTRY_DSN)
redis = Redis()
class DelayedResult(object):
def __init__(self, key):
self.key = key
self._rv = None
@property
def return_value(self):
if self._rv is None:
rv = redis.get(self.key)
if rv is not None:
self._rv = pickle.loads(rv)
return self._rv
def queuefunc(f):
def delay(*args, **kwargs):
qkey = REDIS_QUEUE_KEY
key = '%s:result:%s' % (qkey, str(uuid4()))
s = pickle.dumps((f, key, args, kwargs))
redis.rpush(REDIS_QUEUE_KEY, s)
return DelayedResult(key)
f.delay = delay
return f
@queuefunc
def run_scripts(scripts, args):
build, publish = scripts
try:
subprocess.check_call([build] + args)
except subprocess.CalledProcessError as e:
print('EXCEPTION', e)
sentry.captureException()
try:
subprocess.check_call([publish] + args)
except subprocess.CalledProcessError as e:
print('EXCEPTION', e)
sentry.captureException()
def queue_daemon():
print('Listening for work ... ')
while 1:
msg = redis.blpop(REDIS_QUEUE_KEY)
func, key, args, kwargs = pickle.loads(msg[1])
try:
rv = func(*args, **kwargs)
except Exception as e:
sentry.captureException()
rv = e.message
if rv is not None:
redis.set(key, pickle.dumps(rv))
redis.expire(key, rv_ttl)
| bsd-3-clause |
mhvk/astropy | astropy/io/misc/asdf/tags/transform/basic.py | 8 | 8303 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf.versioning import AsdfVersion
from astropy.modeling import mappings
from astropy.modeling import functional_models
from astropy.modeling.core import CompoundModel
from astropy.io.misc.asdf.types import AstropyAsdfType, AstropyType
from . import _parameter_to_value
__all__ = ['TransformType', 'IdentityType', 'ConstantType']
class TransformType(AstropyAsdfType):
version = '1.2.0'
requires = ['astropy']
@classmethod
def _from_tree_base_transform_members(cls, model, node, ctx):
if 'name' in node:
model.name = node['name']
if 'bounding_box' in node:
model.bounding_box = node['bounding_box']
if "inputs" in node:
if model.n_inputs == 1:
model.inputs = (node["inputs"],)
else:
model.inputs = tuple(node["inputs"])
if "outputs" in node:
if model.n_outputs == 1:
model.outputs = (node["outputs"],)
else:
model.outputs = tuple(node["outputs"])
param_and_model_constraints = {}
for constraint in ['fixed', 'bounds']:
if constraint in node:
param_and_model_constraints[constraint] = node[constraint]
model._initialize_constraints(param_and_model_constraints)
yield model
if 'inverse' in node:
model.inverse = node['inverse']
@classmethod
def from_tree_transform(cls, node, ctx):
raise NotImplementedError(
"Must be implemented in TransformType subclasses")
@classmethod
def from_tree(cls, node, ctx):
model = cls.from_tree_transform(node, ctx)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def _to_tree_base_transform_members(cls, model, node, ctx):
if getattr(model, '_user_inverse', None) is not None:
node['inverse'] = model._user_inverse
if model.name is not None:
node['name'] = model.name
try:
bb = model.bounding_box
except NotImplementedError:
bb = None
if bb is not None:
if model.n_inputs == 1:
bb = list(bb)
else:
bb = [list(item) for item in model.bounding_box]
node['bounding_box'] = bb
if type(model.__class__.inputs) != property:
node['inputs'] = model.inputs
node['outputs'] = model.outputs
# model / parameter constraints
if not isinstance(model, CompoundModel):
fixed_nondefaults = {k: f for k, f in model.fixed.items() if f}
if fixed_nondefaults:
node['fixed'] = fixed_nondefaults
bounds_nondefaults = {k: b for k, b in model.bounds.items() if any(b)}
if bounds_nondefaults:
node['bounds'] = bounds_nondefaults
return node
@classmethod
def to_tree_transform(cls, model, ctx):
raise NotImplementedError("Must be implemented in TransformType subclasses")
@classmethod
def to_tree(cls, model, ctx):
node = cls.to_tree_transform(model, ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
assert a.name == b.name
# TODO: Assert inverses are the same
class IdentityType(TransformType):
name = "transform/identity"
types = ['astropy.modeling.mappings.Identity']
@classmethod
def from_tree_transform(cls, node, ctx):
return mappings.Identity(node.get('n_dims', 1))
@classmethod
def to_tree_transform(cls, data, ctx):
node = {}
if data.n_inputs != 1:
node['n_dims'] = data.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, mappings.Identity) and
isinstance(b, mappings.Identity) and
a.n_inputs == b.n_inputs)
class ConstantType(TransformType):
name = "transform/constant"
version = '1.4.0'
supported_versions = ['1.0.0', '1.1.0', '1.2.0', '1.3.0', '1.4.0']
types = ['astropy.modeling.functional_models.Const1D',
'astropy.modeling.functional_models.Const2D']
@classmethod
def from_tree_transform(cls, node, ctx):
if cls.version < AsdfVersion('1.4.0'):
# The 'dimensions' property was added in 1.4.0,
# previously all values were 1D.
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 1:
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 2:
return functional_models.Const2D(node['value'])
@classmethod
def to_tree_transform(cls, data, ctx):
if cls.version < AsdfVersion('1.4.0'):
if not isinstance(data, functional_models.Const1D):
raise ValueError(
f'constant-{cls.version} does not support models with > 1 dimension')
return {
'value': _parameter_to_value(data.amplitude)
}
else:
if isinstance(data, functional_models.Const1D):
dimension = 1
elif isinstance(data, functional_models.Const2D):
dimension = 2
return {
'value': _parameter_to_value(data.amplitude),
'dimensions': dimension
}
class GenericModel(mappings.Mapping):
def __init__(self, n_inputs, n_outputs):
mapping = tuple(range(n_inputs))
super().__init__(mapping)
self._n_outputs = n_outputs
self._outputs = tuple('x' + str(idx) for idx in range(n_outputs))
@property
def inverse(self):
raise NotImplementedError()
class GenericType(TransformType):
name = "transform/generic"
types = [GenericModel]
@classmethod
def from_tree_transform(cls, node, ctx):
return GenericModel(
node['n_inputs'], node['n_outputs'])
@classmethod
def to_tree_transform(cls, data, ctx):
return {
'n_inputs': data.n_inputs,
'n_outputs': data.n_outputs
}
class UnitsMappingType(AstropyType):
name = "transform/units_mapping"
version = "1.0.0"
types = [mappings.UnitsMapping]
@classmethod
def to_tree(cls, node, ctx):
tree = {}
if node.name is not None:
tree["name"] = node.name
inputs = []
outputs = []
for i, o, m in zip(node.inputs, node.outputs, node.mapping):
input = {
"name": i,
"allow_dimensionless": node.input_units_allow_dimensionless[i],
}
if m[0] is not None:
input["unit"] = m[0]
if node.input_units_equivalencies is not None and i in node.input_units_equivalencies:
input["equivalencies"] = node.input_units_equivalencies[i]
inputs.append(input)
output = {
"name": o,
}
if m[-1] is not None:
output["unit"] = m[-1]
outputs.append(output)
tree["inputs"] = inputs
tree["outputs"] = outputs
return tree
@classmethod
def from_tree(cls, tree, ctx):
mapping = tuple((i.get("unit"), o.get("unit"))
for i, o in zip(tree["inputs"], tree["outputs"]))
equivalencies = None
for i in tree["inputs"]:
if "equivalencies" in i:
if equivalencies is None:
equivalencies = {}
equivalencies[i["name"]] = i["equivalencies"]
kwargs = {
"input_units_equivalencies": equivalencies,
"input_units_allow_dimensionless": {
i["name"]: i.get("allow_dimensionless", False) for i in tree["inputs"]},
}
if "name" in tree:
kwargs["name"] = tree["name"]
return mappings.UnitsMapping(mapping, **kwargs)
| bsd-3-clause |
collex100/odoo | addons/account_anglo_saxon/invoice.py | 50 | 13086 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C)
# 2004-2010 Tiny SPRL (<http://tiny.be>).
# 2009-2010 Veritos (http://veritos.nl).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_round as round
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_columns = {
'move_id': fields.many2one('stock.move', string="Move line", help="If the invoice was generated from a stock.picking, reference to the related move line."),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
if inv.type in ('out_invoice','out_refund'):
for i_line in inv.invoice_line:
res.extend(self._anglo_saxon_sale_move_lines(cr, uid, i_line, res, context=context))
elif inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line:
res.extend(self._anglo_saxon_purchase_move_lines(cr, uid, i_line, res, context=context))
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
fiscal_pool = self.pool.get('account.fiscal.position')
res = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, company_id, context)
if not product:
return res
if type in ('in_invoice','in_refund'):
product_obj = self.pool.get('product.product').browse(cr, uid, product, context=context)
oa = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id
if not oa:
oa = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id
if oa:
fpos = fposition_id and fiscal_pool.browse(cr, uid, fposition_id, context=context) or False
a = fiscal_pool.map_account(cr, uid, fpos, oa)
res['value'].update({'account_id':a})
return res
def _get_price(self, cr, uid, inv, company_currency, i_line, price_unit):
cur_obj = self.pool.get('res.currency')
decimal_precision = self.pool.get('decimal.precision')
if inv.currency_id.id != company_currency:
price = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, price_unit * i_line.quantity, context={'date': inv.date_invoice})
else:
price = price_unit * i_line.quantity
return round(price, decimal_precision.precision_get(cr, uid, 'Account'))
def _anglo_saxon_sale_move_lines(self, cr, uid, i_line, res, context=None):
"""Return the additional move lines for sales invoices and refunds.
i_line: An account.invoice.line object.
res: The move line entries produced so far by the parent move_line_get.
"""
inv = i_line.invoice_id
fiscal_pool = self.pool.get('account.fiscal.position')
fpos = inv.fiscal_position or False
company_currency = inv.company_id.currency_id.id
if i_line.product_id.type != 'service' and i_line.product_id.valuation == 'real_time':
# debit account dacc will be the output account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
# in both cases the credit account cacc will be the expense account
# first check the product, if empty check the category
cacc = i_line.product_id.property_account_expense and i_line.product_id.property_account_expense.id
if not cacc:
cacc = i_line.product_id.categ_id.property_account_expense_categ and i_line.product_id.categ_id.property_account_expense_categ.id
if dacc and cacc:
price_unit = i_line.move_id and i_line.move_id.price_unit or i_line.product_id.standard_price
return [
{
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price':self._get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':dacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
},
{
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price': -1 * self._get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':fiscal_pool.map_account(cr, uid, fpos, cacc),
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
},
]
return []
def _anglo_saxon_purchase_move_lines(self, cr, uid, i_line, res, context=None):
"""Return the additional move lines for purchase invoices and refunds.
i_line: An account.invoice.line object.
res: The move line entries produced so far by the parent move_line_get.
"""
inv = i_line.invoice_id
company_currency = inv.company_id.currency_id.id
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
decimal_precision = self.pool.get('decimal.precision')
account_prec = decimal_precision.precision_get(cr, uid, 'Account')
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if line.get('invl_id', 0) == i_line.id and a == line['account_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if inv.currency_id.id != company_currency:
valuation_price_unit = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, valuation_price_unit, context={'date': inv.date_invoice})
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
# price with discount and without tax included
price_unit = self.pool['account.tax'].compute_all(cr, uid, line['taxes'],
i_line.price_unit * (1-(i_line.discount or 0.0)/100.0), line['quantity'])['total']
price_line = round(valuation_price_unit * line['quantity'], account_prec)
price_diff = round(price_unit - price_line, account_prec)
line.update({'price': price_line})
diff_res.append({
'type': 'src',
'name': i_line.name[:64],
'price_unit': round(price_diff / line['quantity'], account_prec),
'quantity': line['quantity'],
'price': price_diff,
'account_id': acc,
'product_id': line['product_id'],
'uos_id': line['uos_id'],
'account_analytic_id': line['account_analytic_id'],
'taxes': line.get('taxes', []),
})
return diff_res
return []
class account_invoice(osv.osv):
_inherit = "account.invoice"
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
invoice_data = super(account_invoice, self)._prepare_refund(cr, uid, invoice, date, period_id,
description, journal_id, context=context)
if invoice.type == 'in_invoice':
fiscal_position = self.pool.get('account.fiscal.position')
for _, _, line_dict in invoice_data['invoice_line']:
if line_dict.get('product_id'):
product = self.pool.get('product.product').browse(cr, uid, line_dict['product_id'], context=context)
counterpart_acct_id = product.property_stock_account_output and \
product.property_stock_account_output.id
if not counterpart_acct_id:
counterpart_acct_id = product.categ_id.property_stock_account_output_categ and \
product.categ_id.property_stock_account_output_categ.id
if counterpart_acct_id:
fpos = invoice.fiscal_position or False
line_dict['account_id'] = fiscal_position.map_account(cr, uid,
fpos,
counterpart_acct_id)
return invoice_data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
orion1024/Sick-Beard | lib/imdb/parser/mobile/__init__.py | 50 | 37466 | """
parser.mobile package (imdb package).
This package provides the IMDbMobileAccessSystem class used to access
IMDb's data for mobile systems.
the imdb.IMDb function will return an instance of this class when
called with the 'accessSystem' argument set to "mobile".
Copyright 2005-2011 Davide Alberani <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import logging
from urllib import unquote
from imdb.Movie import Movie
from imdb.utils import analyze_title, analyze_name, canonicalName, \
date_and_notes
from imdb._exceptions import IMDbDataAccessError
from imdb.parser.http import IMDbHTTPAccessSystem
from imdb.parser.http.utils import subXMLRefs, subSGMLRefs, build_person, \
build_movie, re_spaces
# XXX NOTE: the first version of this module was heavily based on
# regular expressions. This new version replace regexps with
# find() strings' method calls; despite being less flexible, it
# seems to be at least as fast and, hopefully, much more
# lightweight. Yes: the regexp-based version was too heavyweight
# for systems with very limited CPU power and memory footprint.
re_spacessub = re_spaces.sub
# Strip html.
re_unhtml = re.compile(r'<.+?>')
re_unhtmlsub = re_unhtml.sub
# imdb person or movie ids.
re_imdbID = re.compile(r'(?<=nm|tt|ch)([0-9]{7})\b')
# movie AKAs.
re_makas = re.compile('(<p class="find-aka">.*?</p>)')
# Remove episode numbers.
re_filmo_episodes = re.compile('<div class="filmo-episodes">.*?</div>',
re.M | re.I)
def _unHtml(s):
"""Return a string without tags and no multiple spaces."""
return subSGMLRefs(re_spacessub(' ', re_unhtmlsub('', s)).strip())
_inttype = type(0)
def _getTagsWith(s, cont, toClosure=False, maxRes=None):
"""Return the html tags in the 's' string containing the 'cont'
string; if toClosure is True, everything between the opening
tag and the closing tag is returned."""
lres = []
bi = s.find(cont)
if bi != -1:
btag = s[:bi].rfind('<')
if btag != -1:
if not toClosure:
etag = s[bi+1:].find('>')
if etag != -1:
endidx = bi+2+etag
lres.append(s[btag:endidx])
if maxRes is not None and len(lres) >= maxRes: return lres
lres += _getTagsWith(s[endidx:], cont,
toClosure=toClosure)
else:
spaceidx = s[btag:].find(' ')
if spaceidx != -1:
ctag = '</%s>' % s[btag+1:btag+spaceidx]
closeidx = s[bi:].find(ctag)
if closeidx != -1:
endidx = bi+closeidx+len(ctag)
lres.append(s[btag:endidx])
if maxRes is not None and len(lres) >= maxRes:
return lres
lres += _getTagsWith(s[endidx:], cont,
toClosure=toClosure)
return lres
def _findBetween(s, begins, ends, beginindx=0, maxRes=None, lres=None):
"""Return the list of strings from the 's' string which are included
between the 'begins' and 'ends' strings."""
if lres is None:
lres = []
bi = s.find(begins, beginindx)
if bi != -1:
lbegins = len(begins)
if isinstance(ends, (list, tuple)):
eset = [s.find(end, bi+lbegins) for end in ends]
eset[:] = [x for x in eset if x != -1]
if not eset: ei = -1
else: ei = min(eset)
else:
ei = s.find(ends, bi+lbegins)
if ei != -1:
match = s[bi+lbegins:ei]
lres.append(match)
if maxRes is not None and len(lres) >= maxRes: return lres
_findBetween(s, begins, ends, beginindx=ei, maxRes=maxRes,
lres=lres)
return lres
class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
"""The class used to access IMDb's data through the web for
mobile terminals."""
accessSystem = 'mobile'
_mobile_logger = logging.getLogger('imdbpy.parser.mobile')
def __init__(self, isThin=0, *arguments, **keywords):
self.accessSystem = 'mobile'
IMDbHTTPAccessSystem.__init__(self, isThin, *arguments, **keywords)
def _clean_html(self, html):
"""Normalize the retrieve html."""
html = re_spaces.sub(' ', html)
# Remove silly » chars.
html = html.replace(' »', '')
return subXMLRefs(html)
def _mretrieve(self, url, size=-1):
"""Retrieve an html page and normalize it."""
cont = self._retrieve(url, size=size)
return self._clean_html(cont)
def _getPersons(self, s, sep='<br/>'):
"""Return a list of Person objects, from the string s; items
are assumed to be separated by the sep string."""
names = s.split(sep)
pl = []
plappend = pl.append
counter = 1
for name in names:
pid = re_imdbID.findall(name)
if not pid: continue
characters = _getTagsWith(name, 'class="char"',
toClosure=True, maxRes=1)
chpids = []
if characters:
for ch in characters[0].split(' / '):
chid = re_imdbID.findall(ch)
if not chid:
chpids.append(None)
else:
chpids.append(chid[-1])
if not chpids:
chpids = None
elif len(chpids) == 1:
chpids = chpids[0]
name = _unHtml(name)
# Catch unclosed tags.
gt_indx = name.find('>')
if gt_indx != -1:
name = name[gt_indx+1:].lstrip()
if not name: continue
if name.endswith('...'):
name = name[:-3]
p = build_person(name, personID=str(pid[0]), billingPos=counter,
modFunct=self._defModFunct, roleID=chpids,
accessSystem=self.accessSystem)
plappend(p)
counter += 1
return pl
def _search_movie(self, title, results):
##params = urllib.urlencode({'tt': 'on','mx': str(results),'q': title})
##params = 'q=%s&tt=on&mx=%s' % (urllib.quote_plus(title), str(results))
##cont = self._mretrieve(imdbURL_search % params)
cont = subXMLRefs(self._get_search_content('tt', title, results))
title = _findBetween(cont, '<title>', '</title>', maxRes=1)
res = []
if not title:
self._mobile_logger.error('no title tag searching for movie %s',
title)
return res
tl = title[0].lower()
if not tl.startswith('imdb title'):
# a direct hit!
title = _unHtml(title[0])
mid = None
midtag = _getTagsWith(cont, 'rel="canonical"', maxRes=1)
if midtag:
mid = _findBetween(midtag[0], '/title/tt', '/', maxRes=1)
if not (mid and title):
self._mobile_logger.error('no direct hit title/movieID for' \
' title %s', title)
return res
if cont.find('<span class="tv-extra">TV mini-series</span>') != -1:
title += ' (mini)'
res[:] = [(str(mid[0]), analyze_title(title))]
else:
# XXX: this results*3 prevents some recursion errors, but...
# it's not exactly understandable (i.e.: why 'results' is
# not enough to get all the results?)
lis = _findBetween(cont, 'td valign="top">', '</td>',
maxRes=results*3)
for li in lis:
akas = re_makas.findall(li)
for idx, aka in enumerate(akas):
aka = aka.replace('" - ', '::', 1)
aka = _unHtml(aka)
if aka.startswith('aka "'):
aka = aka[5:].strip()
if aka[-1] == '"':
aka = aka[:-1]
akas[idx] = aka
imdbid = re_imdbID.findall(li)
li = re_makas.sub('', li)
mtitle = _unHtml(li)
if not (imdbid and mtitle):
self._mobile_logger.debug('no title/movieID parsing' \
' %s searching for title %s', li,
title)
continue
mtitle = mtitle.replace('(TV mini-series)', '(mini)')
resd = analyze_title(mtitle)
if akas:
resd['akas'] = akas
res.append((str(imdbid[0]), resd))
return res
def get_movie_main(self, movieID):
cont = self._mretrieve(self.urls['movie_main'] % movieID + 'maindetails')
title = _findBetween(cont, '<title>', '</title>', maxRes=1)
if not title:
raise IMDbDataAccessError('unable to get movieID "%s"' % movieID)
title = _unHtml(title[0])
if title.endswith(' - IMDb'):
title = title[:-7]
if cont.find('<span class="tv-extra">TV mini-series</span>') != -1:
title += ' (mini)'
d = analyze_title(title)
kind = d.get('kind')
tv_series = _findBetween(cont, 'TV Series:</h5>', '</a>', maxRes=1)
if tv_series: mid = re_imdbID.findall(tv_series[0])
else: mid = None
if tv_series and mid:
s_title = _unHtml(tv_series[0])
s_data = analyze_title(s_title)
m = Movie(movieID=str(mid[0]), data=s_data,
accessSystem=self.accessSystem,
modFunct=self._defModFunct)
d['kind'] = kind = u'episode'
d['episode of'] = m
if kind in ('tv series', 'tv mini series'):
years = _findBetween(cont, '<h1>', '</h1>', maxRes=1)
if years:
years[:] = _findBetween(years[0], 'TV series', '</span>',
maxRes=1)
if years:
d['series years'] = years[0].strip()
air_date = _findBetween(cont, 'Original Air Date:</h5>', '</div>',
maxRes=1)
if air_date:
air_date = air_date[0]
vi = air_date.find('(')
if vi != -1:
date = _unHtml(air_date[:vi]).strip()
if date != '????':
d['original air date'] = date
air_date = air_date[vi:]
season = _findBetween(air_date, 'Season', ',', maxRes=1)
if season:
season = season[0].strip()
try: season = int(season)
except: pass
if season or type(season) is _inttype:
d['season'] = season
episode = _findBetween(air_date, 'Episode', ')', maxRes=1)
if episode:
episode = episode[0].strip()
try: episode = int(episode)
except: pass
if episode or type(season) is _inttype:
d['episode'] = episode
direct = _findBetween(cont, '<h5>Director', ('</div>', '<br/> <br/>'),
maxRes=1)
if direct:
direct = direct[0]
h5idx = direct.find('/h5>')
if h5idx != -1:
direct = direct[h5idx+4:]
direct = self._getPersons(direct)
if direct: d['director'] = direct
if kind in ('tv series', 'tv mini series', 'episode'):
if kind != 'episode':
seasons = _findBetween(cont, 'Seasons:</h5>', '</div>',
maxRes=1)
if seasons:
d['number of seasons'] = seasons[0].count('|') + 1
creator = _findBetween(cont, 'Created by</h5>', ('class="tn15more"',
'</div>',
'<br/> <br/>'),
maxRes=1)
if not creator:
# They change 'Created by' to 'Creator' and viceversa
# from time to time...
# XXX: is 'Creators' also used?
creator = _findBetween(cont, 'Creator:</h5>',
('class="tn15more"', '</div>',
'<br/> <br/>'), maxRes=1)
if creator:
creator = creator[0]
if creator.find('tn15more'): creator = '%s>' % creator
creator = self._getPersons(creator)
if creator: d['creator'] = creator
writers = _findBetween(cont, '<h5>Writer', ('</div>', '<br/> <br/>'),
maxRes=1)
if writers:
writers = writers[0]
h5idx = writers.find('/h5>')
if h5idx != -1:
writers = writers[h5idx+4:]
writers = self._getPersons(writers)
if writers: d['writer'] = writers
cvurl = _getTagsWith(cont, 'name="poster"', toClosure=True, maxRes=1)
if cvurl:
cvurl = _findBetween(cvurl[0], 'src="', '"', maxRes=1)
if cvurl: d['cover url'] = cvurl[0]
genres = _findBetween(cont, 'href="/genre/', '"')
if genres:
d['genres'] = list(set(genres))
ur = _findBetween(cont, 'id="star-bar-user-rate">', '</div>',
maxRes=1)
if ur:
rat = _findBetween(ur[0], '<b>', '</b>', maxRes=1)
if rat:
if rat:
d['rating'] = rat[0].strip()
else:
self._mobile_logger.warn('wrong rating: %s', rat)
vi = ur[0].rfind('href="ratings"')
if vi != -1 and ur[0][vi+10:].find('await') == -1:
try:
votes = _findBetween(ur[0][vi:], "title='",
" IMDb", maxRes=1)
votes = int(votes[0].replace(',', ''))
d['votes'] = votes
except (ValueError, IndexError):
self._mobile_logger.warn('wrong votes: %s', ur)
top250 = _findBetween(cont, 'href="/chart/top?', '</a>', maxRes=1)
if top250:
fn = top250[0].rfind('#')
if fn != -1:
try:
td = int(top250[0][fn+1:])
d['top 250 rank'] = td
except ValueError:
self._mobile_logger.warn('wrong top250: %s', top250)
castdata = _findBetween(cont, 'Cast overview', '</table>', maxRes=1)
if not castdata:
castdata = _findBetween(cont, 'Credited cast', '</table>', maxRes=1)
if not castdata:
castdata = _findBetween(cont, 'Complete credited cast', '</table>',
maxRes=1)
if not castdata:
castdata = _findBetween(cont, 'Series Cast Summary', '</table>',
maxRes=1)
if not castdata:
castdata = _findBetween(cont, 'Episode Credited cast', '</table>',
maxRes=1)
if castdata:
castdata = castdata[0]
# Reintegrate the fist tag.
fl = castdata.find('href=')
if fl != -1: castdata = '<a ' + castdata[fl:]
# Exclude the 'rest of cast listed alphabetically' row.
smib = castdata.find('<tr><td align="center" colspan="4"><small>')
if smib != -1:
smie = castdata.rfind('</small></td></tr>')
if smie != -1:
castdata = castdata[:smib].strip() + \
castdata[smie+18:].strip()
castdata = castdata.replace('/tr> <tr', '/tr><tr')
cast = self._getPersons(castdata, sep='</tr><tr')
if cast: d['cast'] = cast
akas = _findBetween(cont, 'Also Known As:</h5>', '</div>', maxRes=1)
if akas:
# For some reason, here <br> is still used in place of <br/>.
akas[:] = [x for x in akas[0].split('<br>') if x.strip()]
akas = [_unHtml(x).replace('" - ','::', 1).lstrip('"').strip()
for x in akas]
if 'See more' in akas: akas.remove('See more')
akas[:] = [x for x in akas if x]
if akas:
d['akas'] = akas
mpaa = _findBetween(cont, 'MPAA</a>:', '</div>', maxRes=1)
if mpaa: d['mpaa'] = _unHtml(mpaa[0])
runtimes = _findBetween(cont, 'Runtime:</h5>', '</div>', maxRes=1)
if runtimes:
runtimes = runtimes[0]
runtimes = [x.strip().replace(' min', '').replace(' (', '::(', 1)
for x in runtimes.split('|')]
d['runtimes'] = [_unHtml(x).strip() for x in runtimes]
if kind == 'episode':
# number of episodes.
epsn = _findBetween(cont, 'title="Full Episode List">', '</a>',
maxRes=1)
if epsn:
epsn = epsn[0].replace(' Episodes', '').strip()
if epsn:
try:
epsn = int(epsn)
except:
self._mobile_logger.warn('wrong episodes #: %s', epsn)
d['number of episodes'] = epsn
country = _findBetween(cont, 'Country:</h5>', '</div>', maxRes=1)
if country:
country[:] = country[0].split(' | ')
country[:] = ['<a %s' % x for x in country if x]
country[:] = [_unHtml(x.replace(' <i>', '::')) for x in country]
if country: d['countries'] = country
lang = _findBetween(cont, 'Language:</h5>', '</div>', maxRes=1)
if lang:
lang[:] = lang[0].split(' | ')
lang[:] = ['<a %s' % x for x in lang if x]
lang[:] = [_unHtml(x.replace(' <i>', '::')) for x in lang]
if lang: d['languages'] = lang
col = _findBetween(cont, '"/search/title?colors=', '</div>')
if col:
col[:] = col[0].split(' | ')
col[:] = ['<a %s' % x for x in col if x]
col[:] = [_unHtml(x.replace(' <i>', '::')) for x in col]
if col: d['color info'] = col
sm = _findBetween(cont, '/search/title?sound_mixes=', '</div>',
maxRes=1)
if sm:
sm[:] = sm[0].split(' | ')
sm[:] = ['<a %s' % x for x in sm if x]
sm[:] = [_unHtml(x.replace(' <i>', '::')) for x in sm]
if sm: d['sound mix'] = sm
cert = _findBetween(cont, 'Certification:</h5>', '</div>', maxRes=1)
if cert:
cert[:] = cert[0].split(' | ')
cert[:] = [_unHtml(x.replace(' <i>', '::')) for x in cert]
if cert: d['certificates'] = cert
plotoutline = _findBetween(cont, 'Plot:</h5>', ['<a ', '</div>'],
maxRes=1)
if plotoutline:
plotoutline = plotoutline[0].strip()
plotoutline = plotoutline.rstrip('|').rstrip()
if plotoutline: d['plot outline'] = _unHtml(plotoutline)
aratio = _findBetween(cont, 'Aspect Ratio:</h5>', ['<a ', '</div>'],
maxRes=1)
if aratio:
aratio = aratio[0].strip().replace(' (', '::(', 1)
if aratio:
d['aspect ratio'] = _unHtml(aratio)
return {'data': d}
def get_movie_plot(self, movieID):
cont = self._mretrieve(self.urls['movie_main'] % movieID + 'plotsummary')
plot = _findBetween(cont, '<p class="plotpar">', '</p>')
plot[:] = [_unHtml(x) for x in plot]
for i in xrange(len(plot)):
p = plot[i]
wbyidx = p.rfind(' Written by ')
if wbyidx != -1:
plot[i] = '%s::%s' % \
(p[:wbyidx].rstrip(),
p[wbyidx+12:].rstrip().replace('{','<').replace('}','>'))
if plot: return {'data': {'plot': plot}}
return {'data': {}}
def _search_person(self, name, results):
##params = urllib.urlencode({'nm': 'on', 'mx': str(results), 'q': name})
##params = 'q=%s&nm=on&mx=%s' % (urllib.quote_plus(name), str(results))
##cont = self._mretrieve(imdbURL_search % params)
cont = subXMLRefs(self._get_search_content('nm', name, results))
name = _findBetween(cont, '<title>', '</title>', maxRes=1)
res = []
if not name:
self._mobile_logger.warn('no title tag searching for name %s', name)
return res
nl = name[0].lower()
if not nl.startswith('imdb name'):
# a direct hit!
name = _unHtml(name[0])
name = name.replace('- Filmography by type' , '').strip()
pid = None
pidtag = _getTagsWith(cont, 'rel="canonical"', maxRes=1)
if pidtag:
pid = _findBetween(pidtag[0], '/name/nm', '/', maxRes=1)
if not (pid and name):
self._mobile_logger.error('no direct hit name/personID for' \
' name %s', name)
return res
res[:] = [(str(pid[0]), analyze_name(name, canonical=1))]
else:
lis = _findBetween(cont, 'td valign="top">', '</td>',
maxRes=results*3)
for li in lis:
akas = _findBetween(li, '<em>"', '"</em>')
for sep in ['<small', '<br> aka', '<br> birth name']:
sepIdx = li.find(sep)
if sepIdx != -1:
li = li[:sepIdx]
pid = re_imdbID.findall(li)
pname = _unHtml(li)
if not (pid and pname):
self._mobile_logger.debug('no name/personID parsing' \
' %s searching for name %s', li,
name)
continue
resd = analyze_name(pname, canonical=1)
if akas:
resd['akas'] = akas
res.append((str(pid[0]), resd))
return res
def get_person_main(self, personID, _parseChr=False):
if not _parseChr:
url = self.urls['person_main'] % personID + 'maindetails'
else:
url = self.urls['character_main'] % personID
s = self._mretrieve(url)
r = {}
name = _findBetween(s, '<title>', '</title>', maxRes=1)
if not name:
if _parseChr: w = 'characterID'
else: w = 'personID'
raise IMDbDataAccessError('unable to get %s "%s"' % (w, personID))
name = _unHtml(name[0].replace(' - IMDb', ''))
if _parseChr:
name = name.replace('(Character)', '').strip()
name = name.replace('- Filmography by type', '').strip()
else:
name = name.replace('- Filmography by', '').strip()
r = analyze_name(name, canonical=not _parseChr)
for dKind in ('Born', 'Died'):
date = _findBetween(s, '%s:</h4>' % dKind.capitalize(),
('<div class', '</div>', '<br/><br/>'), maxRes=1)
if date:
date = _unHtml(date[0])
if date:
#date, notes = date_and_notes(date)
# TODO: fix to handle real names.
date_notes = date.split(' in ', 1)
notes = u''
date = date_notes[0]
if len(date_notes) == 2:
notes = date_notes[1]
dtitle = 'birth'
if dKind == 'Died':
dtitle = 'death'
if date:
r['%s date' % dtitle] = date
if notes:
r['%s notes' % dtitle] = notes
akas = _findBetween(s, 'Alternate Names:</h4>', ('</div>',
'<br/><br/>'), maxRes=1)
if akas:
akas = akas[0]
if akas:
akas = _unHtml(akas)
if akas.find(' | ') != -1:
akas = akas.split(' | ')
else:
akas = akas.split(' / ')
if akas: r['akas'] = filter(None, [x.strip() for x in akas])
hs = _findBetween(s, "rel='image_src'", '>', maxRes=1)
if not hs:
hs = _findBetween(s, 'rel="image_src"', '>', maxRes=1)
if not hs:
hs = _findBetween(s, '<a name="headshot"', '</a>', maxRes=1)
if hs:
hsl = _findBetween(hs[0], "href='", "'", maxRes=1)
if not hsl:
hsl = _findBetween(hs[0], 'href="', '"', maxRes=1)
if hsl and 'imdb-share-logo' not in hsl[0]:
r['headshot'] = hsl[0]
# Build a list of tuples such [('hrefLink', 'section name')]
workkind = _findBetween(s, 'id="jumpto_', '</a>')
ws = []
for work in workkind:
sep = '" >'
if '">' in work:
sep = '">'
wsplit = work.split(sep, 1)
if len(wsplit) == 2:
sect = wsplit[0]
if '"' in sect:
sect = sect[:sect.find('"')]
ws.append((sect, wsplit[1].lower()))
# XXX: I think "guest appearances" are gone.
if s.find('<a href="#guest-appearances"') != -1:
ws.append(('guest-appearances', 'notable tv guest appearances'))
#if _parseChr:
# ws.append(('filmography', 'filmography'))
for sect, sectName in ws:
raws = u''
if sectName == 'self':
sect = 'Self'
# Everything between the current section link and the end
# of the <ol> tag.
if _parseChr and sect == 'filmography':
inisect = s.find('<div class="filmo">')
else:
inisect = s.find('<a name="%s' % sect)
if inisect != -1:
endsect = s[inisect:].find('<div id="filmo-head-')
if endsect == -1:
endsect = s[inisect:].find('<div class="article"')
if endsect != -1: raws = s[inisect:inisect+endsect]
#if not raws: continue
mlist = _findBetween(raws, '<div class="filmo-row',
('<div class="clear"/>',))
for m in mlist:
fCB = m.find('>')
if fCB != -1:
m = m[fCB+1:].lstrip()
m = re_filmo_episodes.sub('', m)
# For every movie in the current section.
movieID = re_imdbID.findall(m)
if not movieID:
self._mobile_logger.debug('no movieID in %s', m)
continue
m = m.replace('<br/>', ' .... ', 1)
if not _parseChr:
chrIndx = m.find(' .... ')
else:
chrIndx = m.find(' Played by ')
chids = []
if chrIndx != -1:
chrtxt = m[chrIndx+6:]
if _parseChr:
chrtxt = chrtxt[5:]
for ch in chrtxt.split(' / '):
chid = re_imdbID.findall(ch)
if not chid:
chids.append(None)
else:
chids.append(chid[-1])
if not chids:
chids = None
elif len(chids) == 1:
chids = chids[0]
movieID = str(movieID[0])
# Search the status.
stidx = m.find('<i>')
status = u''
if stidx != -1:
stendidx = m.rfind('</i>')
if stendidx != -1:
status = _unHtml(m[stidx+3:stendidx])
m = m.replace(m[stidx+3:stendidx], '')
year = _findBetween(m, 'year_column">', '</span>', maxRes=1)
if year:
year = year[0]
m = m.replace('<span class="year_column">%s</span>' % year,
'')
else:
year = None
m = _unHtml(m)
if not m:
self._mobile_logger.warn('no title for movieID %s', movieID)
continue
movie = build_movie(m, movieID=movieID, status=status,
roleID=chids, modFunct=self._defModFunct,
accessSystem=self.accessSystem,
_parsingCharacter=_parseChr, year=year)
sectName = sectName.split(':')[0]
r.setdefault(sectName, []).append(movie)
# If available, take the always correct name from a form.
itag = _getTagsWith(s, 'NAME="primary"', maxRes=1)
if not itag:
itag = _getTagsWith(s, 'name="primary"', maxRes=1)
if itag:
vtag = _findBetween(itag[0], 'VALUE="', ('"', '>'), maxRes=1)
if not vtag:
vtag = _findBetween(itag[0], 'value="', ('"', '>'), maxRes=1)
if vtag:
try:
vtag = unquote(str(vtag[0]))
vtag = unicode(vtag, 'latin_1')
r.update(analyze_name(vtag))
except UnicodeEncodeError:
pass
return {'data': r, 'info sets': ('main', 'filmography')}
def get_person_biography(self, personID):
cont = self._mretrieve(self.urls['person_main'] % personID + 'bio')
d = {}
spouses = _findBetween(cont, 'Spouse</h5>', ('</table>', '</dd>'),
maxRes=1)
if spouses:
sl = []
for spouse in spouses[0].split('</tr>'):
if spouse.count('</td>') > 1:
spouse = spouse.replace('</td>', '::</td>', 1)
spouse = _unHtml(spouse)
spouse = spouse.replace(':: ', '::').strip()
if spouse: sl.append(spouse)
if sl: d['spouse'] = sl
nnames = _findBetween(cont, '<h5>Nickname</h5>', ('<br/> <br/>','<h5>'),
maxRes=1)
if nnames:
nnames = nnames[0]
if nnames:
nnames = [x.strip().replace(' (', '::(', 1)
for x in nnames.split('<br/>')]
if nnames:
d['nick names'] = nnames
misc_sects = _findBetween(cont, '<h5>', '<br/>')
misc_sects[:] = [x.split('</h5>') for x in misc_sects]
misc_sects[:] = [x for x in misc_sects if len(x) == 2]
for sect, data in misc_sects:
sect = sect.lower().replace(':', '').strip()
if d.has_key(sect) and sect != 'mini biography': continue
elif sect in ('spouse', 'nickname'): continue
if sect == 'salary': sect = 'salary history'
elif sect == 'where are they now': sect = 'where now'
elif sect == 'personal quotes': sect = 'quotes'
data = data.replace('</p><p>', '::')
data = data.replace('<br><br>', ' ') # for multi-paragraphs 'bio'
data = data.replace('</td> <td valign="top">', '@@@@')
data = data.replace('</td> </tr>', '::')
data = _unHtml(data)
data = [x.strip() for x in data.split('::')]
data[:] = [x.replace('@@@@', '::') for x in data if x]
if sect == 'height' and data: data = data[0]
elif sect == 'birth name': data = canonicalName(data[0])
elif sect == 'date of birth':
date, notes = date_and_notes(data[0])
if date:
d['birth date'] = date
if notes:
d['birth notes'] = notes
continue
elif sect == 'date of death':
date, notes = date_and_notes(data[0])
if date:
d['death date'] = date
if notes:
d['death notes'] = notes
continue
elif sect == 'mini biography':
ndata = []
for bio in data:
byidx = bio.rfind('IMDb Mini Biography By')
if byidx != -1:
bioAuth = bio[:byidx].rstrip()
else:
bioAuth = 'Anonymous'
bio = u'%s::%s' % (bioAuth, bio[byidx+23:].lstrip())
ndata.append(bio)
data[:] = ndata
if 'mini biography' in d:
d['mini biography'].append(ndata[0])
continue
d[sect] = data
return {'data': d}
def _search_character(self, name, results):
cont = subXMLRefs(self._get_search_content('char', name, results))
name = _findBetween(cont, '<title>', '</title>', maxRes=1)
res = []
if not name:
self._mobile_logger.error('no title tag searching character %s',
name)
return res
nl = name[0].lower()
if not (nl.startswith('imdb search') or nl.startswith('imdb search') \
or nl.startswith('imdb character')):
# a direct hit!
name = _unHtml(name[0]).replace('(Character)', '').strip()
pid = None
pidtag = _getTagsWith(cont, 'rel="canonical"', maxRes=1)
if pidtag:
pid = _findBetween(pidtag[0], '/character/ch', '/', maxRes=1)
if not (pid and name):
self._mobile_logger.error('no direct hit name/characterID for' \
' character %s', name)
return res
res[:] = [(str(pid[0]), analyze_name(name))]
else:
sects = _findBetween(cont, '<b>Popular Characters</b>', '</table>',
maxRes=results*3)
sects += _findBetween(cont, '<b>Characters', '</table>',
maxRes=results*3)
for sect in sects:
lis = _findBetween(sect, '<a href="/character/',
['<small', '</td>', '<br'])
for li in lis:
li = '<%s' % li
pid = re_imdbID.findall(li)
pname = _unHtml(li)
if not (pid and pname):
self._mobile_logger.debug('no name/characterID' \
' parsing %s searching for' \
' character %s', li, name)
continue
res.append((str(pid[0]), analyze_name(pname)))
return res
def get_character_main(self, characterID):
return self.get_person_main(characterID, _parseChr=True)
def get_character_biography(self, characterID):
cont = self._mretrieve(self.urls['character_main'] % characterID + 'bio')
d = {}
intro = _findBetween(cont, '<div class="display">',
('<span>', '<h4>'), maxRes=1)
if intro:
intro = _unHtml(intro[0]).strip()
if intro:
d['introduction'] = intro
tocidx = cont.find('<table id="toc..')
if tocidx != -1:
cont = cont[tocidx:]
bios = _findBetween(cont, '<h4>', ('<h4>', '</div>'))
if bios:
for bio in bios:
bio = bio.replace('</h4>', '::')
bio = bio.replace('\n', ' ')
bio = bio.replace('<br>', '\n')
bio = bio.replace('<br/>', '\n')
bio = subSGMLRefs(re_unhtmlsub('', bio).strip())
bio = bio.replace(' ::', '::').replace(':: ', '::')
bio = bio.replace('::', ': ', 1)
if bio:
d.setdefault('biography', []).append(bio)
return {'data': d}
| gpl-3.0 |
sephii/django | django/template/defaulttags.py | 2 | 53552 | """Default tags used by the template system, available to all templates."""
from __future__ import unicode_literals
import os
import sys
import re
from datetime import datetime
from itertools import groupby, cycle as itertools_cycle
import warnings
from django.conf import settings
from django.template.base import (Node, NodeList, Template, Context, Library,
TemplateSyntaxError, VariableDoesNotExist, InvalidTemplateLibrary,
BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END,
SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END,
VARIABLE_ATTRIBUTE_SEPARATOR, get_library, token_kwargs, kwarg_re,
render_value_in_context)
from django.template.smartif import IfParser, Literal
from django.template.defaultfilters import date
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, smart_text
from django.utils.lorem_ipsum import words, paragraphs
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils import six
from django.utils import timezone
register = Library()
class AutoEscapeControlNode(Node):
"""Implements the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ''
class CsrfTokenNode(Node):
def render(self, context):
csrf_token = context.get('csrf_token', None)
if csrf_token:
if csrf_token == 'NOTPROVIDED':
return format_html("")
else:
return format_html("<input type='hidden' name='csrfmiddlewaretoken' value='{}' />", csrf_token)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
if settings.DEBUG:
warnings.warn(
"A {% csrf_token %} was used in a template, but the context "
"did not provide the value. This is usually caused by not "
"using RequestContext."
)
return ''
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None, silent=False):
self.cyclevars = cyclevars
self.variable_name = variable_name
self.silent = silent
def render(self, context):
if self not in context.render_context:
# First time the node is rendered in template
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = next(cycle_iter).resolve(context)
if self.variable_name:
context[self.variable_name] = value
if self.silent:
return ''
return render_value_in_context(value, context)
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [force_text(pformat(val)) for val in context]
output.append('\n\n')
output.append(pformat(sys.modules))
return ''.join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
with context.push(var=output):
return self.filter_expr.resolve(context)
class FirstOfNode(Node):
def __init__(self, variables):
self.vars = variables
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
return render_value_in_context(value, context)
return ''
class ForNode(Node):
child_nodelists = ('nodelist_loop', 'nodelist_empty')
def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = ' reversed' if self.is_reversed else ''
return "<For Node: for %s in %s, tail_len: %d%s>" % \
(', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),
reversed_text)
def __iter__(self):
for node in self.nodelist_loop:
yield node
for node in self.nodelist_empty:
yield node
def render(self, context):
if 'forloop' in context:
parentloop = context['forloop']
else:
parentloop = {}
with context.push():
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, '__len__'):
values = list(values)
len_values = len(values)
if len_values < 1:
return self.nodelist_empty.render(context)
nodelist = []
if self.is_reversed:
values = reversed(values)
num_loopvars = len(self.loopvars)
unpack = num_loopvars > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context['forloop'] = {'parentloop': parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict['counter0'] = i
loop_dict['counter'] = i + 1
# Reverse counter iteration numbers.
loop_dict['revcounter'] = len_values - i
loop_dict['revcounter0'] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict['first'] = (i == 0)
loop_dict['last'] = (i == len_values - 1)
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
# To complete this deprecation, remove from here to the
# try/except block as well as the try/except itself,
# leaving `unpacked_vars = ...` and the "else" statements.
if not isinstance(item, (list, tuple)):
len_item = 1
else:
len_item = len(item)
# Check loop variable count before unpacking
if num_loopvars != len_item:
warnings.warn(
"Need {} values to unpack in for loop; got {}. "
"This will raise an exception in Django 2.0."
.format(num_loopvars, len_item),
RemovedInDjango20Warning)
try:
unpacked_vars = dict(zip(self.loopvars, item))
except TypeError:
pass
else:
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
# In TEMPLATE_DEBUG mode provide source of the node which
# actually raised the exception
if context.engine.debug:
for node in self.nodelist_loop:
try:
nodelist.append(node.render(context))
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = node.source
raise
else:
for node in self.nodelist_loop:
nodelist.append(node.render(context))
if pop_context:
# The loop variables were pushed on to the context so pop them
# off again. This is necessary because the tag lets the length
# of loopvars differ to the length of each set of items and we
# don't want to leave any vars from the previous loop on the
# context.
context.pop()
return mark_safe(''.join(force_text(n) for n in nodelist))
class IfChangedNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._varlist = varlist
def render(self, context):
# Init state storage
state_frame = self._get_context_stack_frame(context)
if self not in state_frame:
state_frame[self] = None
nodelist_true_output = None
try:
if self._varlist:
# Consider multiple parameters. This automatically behaves
# like an OR evaluation of the multiple variables.
compare_to = [var.resolve(context, True) for var in self._varlist]
else:
# The "{% ifchanged %}" syntax (without any variables) compares the rendered output.
compare_to = nodelist_true_output = self.nodelist_true.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != state_frame[self]:
state_frame[self] = compare_to
# render true block if not already rendered
return nodelist_true_output or self.nodelist_true.render(context)
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ''
def _get_context_stack_frame(self, context):
# The Context object behaves like a stack where each template tag can create a new scope.
# Find the place where to store the state to detect changes.
if 'forloop' in context:
# Ifchanged is bound to the local for loop.
# When there is a loop-in-loop, the state is bound to the inner loop,
# so it resets when the outer loop continues.
return context['forloop']
else:
# Using ifchanged outside loops. Effectively this is a no-op because the state is associated with 'self'.
return context.render_context
class IfEqualNode(Node):
child_nodelists = ('nodelist_true', 'nodelist_false')
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
val1 = self.var1.resolve(context, True)
val2 = self.var2.resolve(context, True)
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<IfNode>"
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
for node in nodelist:
yield node
@property
def nodelist(self):
return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ''
class LoremNode(Node):
def __init__(self, count, method, common):
self.count, self.method, self.common = count, method, common
def render(self, context):
try:
count = int(self.count.resolve(context))
except (ValueError, TypeError):
count = 1
if self.method == 'w':
return words(count, common=self.common)
else:
paras = paragraphs(count, common=self.common)
if self.method == 'p':
paras = ['<p>%s</p>' % p for p in paras]
return '\n\n'.join(paras)
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def resolve_expression(self, obj, context):
# This method is called for each object in self.target. See regroup()
# for the reason why we temporarily put the object in the context.
context[self.var_name] = obj
return self.expression.resolve(context, True)
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list is None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ''
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
{'grouper': key, 'list': list(val)}
for key, val in
groupby(obj_list, lambda obj: self.resolve_expression(obj, context))
]
return ''
def include_is_allowed(filepath, allowed_include_roots):
filepath = os.path.abspath(filepath)
for root in allowed_include_roots:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath = filepath
self.parsed = parsed
def render(self, context):
filepath = self.filepath.resolve(context)
if not include_is_allowed(filepath, context.engine.allowed_include_roots):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return '' # Fail silently for invalid includes.
try:
with open(filepath, 'r') as fp:
output = fp.read()
except IOError:
output = ''
if self.parsed:
try:
t = Template(output, name=filepath, engine=context.engine)
return t.render(context)
except TemplateSyntaxError as e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return '' # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ''
class NowNode(Node):
def __init__(self, format_string, asvar=None):
self.format_string = format_string
self.asvar = asvar
def render(self, context):
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
formatted = date(datetime.now(tz=tzinfo), self.format_string)
if self.asvar:
context[self.asvar] = formatted
return ''
else:
return formatted
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {'openblock': BLOCK_TAG_START,
'closeblock': BLOCK_TAG_END,
'openvariable': VARIABLE_TAG_START,
'closevariable': VARIABLE_TAG_END,
'openbrace': SINGLE_BRACE_START,
'closebrace': SINGLE_BRACE_END,
'opencomment': COMMENT_TAG_START,
'closecomment': COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, '')
class URLNode(Node):
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict((smart_text(k, 'ascii'), v.resolve(context))
for k, v in self.kwargs.items())
view_name = self.view_name.resolve(context)
try:
current_app = context.request.current_app
except AttributeError:
# Change the fallback value to None when the deprecation path for
# Context.current_app completes in Django 2.0.
current_app = context.current_app
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which case return nothing.
url = ''
try:
url = reverse(view_name, args=args, kwargs=kwargs, current_app=current_app)
except NoReverseMatch:
exc_info = sys.exc_info()
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + '.' + view_name,
args=args, kwargs=kwargs,
current_app=current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
six.reraise(*exc_info)
else:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
class VerbatimNode(Node):
def __init__(self, content):
self.content = content
def render(self, context):
return self.content
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width, asvar=None):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
self.asvar = asvar
def render(self, context):
try:
value = self.val_expr.resolve(context)
max_value = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ''
except (ValueError, TypeError):
raise TemplateSyntaxError("widthratio final argument must be a number")
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
result = str(int(round(ratio)))
except ZeroDivisionError:
return '0'
except (ValueError, TypeError, OverflowError):
return ''
if self.asvar:
context[self.asvar] = result
return ''
else:
return result
class WithNode(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return "<WithNode>"
def render(self, context):
values = {key: val.resolve(context) for key, val in
six.iteritems(self.extra_context)}
with context.push(**values):
return self.nodelist.render(context)
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in ('on', 'off'):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(('endautoescape',))
parser.delete_first_token()
return AutoEscapeControlNode((arg == 'on'), nodelist)
@register.tag
def comment(parser, token):
"""
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past('endcomment')
return CommentNode()
@register.tag
def cycle(parser, token):
"""
Cycles among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each successive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% for o in some_list %}
{% cycle 'row1' 'row2' as rowcolors silent %}
<tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr>
{% endfor %}
"""
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if ',' in args[1]:
# Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}
# case.
args[1:2] = ['"%s"' % arg for arg in args[1].split(",")]
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, '_namedCycleNodes'):
raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name)
if name not in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1])
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent)
if not hasattr(parser, '_namedCycleNodes'):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values)
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Outputs a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag('filter')
def do_filter(parser, token):
"""
Filters the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
filter_name = getattr(func, '_filter_name', None)
if filter_name in ('escape', 'safe'):
raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % filter_name)
nodelist = parser.parse(('endfilter',))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token):
"""
Outputs the first variable passed that is not False, without escaping.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1|safe }}
{% elif var2 %}
{{ var2|safe }}
{% elif var3 %}
{{ var3|safe }}
{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to escape the output, use a filter tag::
{% filter force_escape %}
{% firstof var1 var2 var3 "fallback value" %}
{% endfilter %}
"""
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
return FirstOfNode([parser.compile_filter(bit) for bit in bits])
@register.tag('for')
def do_for(parser, token):
"""
Loops over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if althete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.split_contents()
if len(bits) < 4:
raise TemplateSyntaxError("'for' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = -3 if is_reversed else -2
if bits[in_index] != 'in':
raise TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
sequence = parser.compile_filter(bits[in_index + 1])
nodelist_loop = parser.parse(('empty', 'endfor',))
token = parser.next_token()
if token.contents == 'empty':
nodelist_empty = parser.parse(('endfor',))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError("%r takes two arguments" % bits[0])
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
val1 = parser.compile_filter(bits[1])
val2 = parser.compile_filter(bits[2])
return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)
@register.tag
def ifequal(parser, token):
"""
Outputs the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
@register.tag
def ifnotequal(parser, token):
"""
Outputs the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag('if')
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e., exists, is not empty, and is not a false boolean value), the
contents of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag may take one or several `` {% elif %}``
clauses, as well as an ``{% else %}`` clause that will be displayed if all
previous conditions fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both athletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith('elif'):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(('elif', 'else', 'endif'))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == 'else':
nodelist = parser.parse(('endif',))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
assert token.contents == 'endif'
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Checks if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.split_contents()
nodelist_true = parser.parse(('else', 'endifchanged'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifchanged',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" parsed %}
"""
warnings.warn(
"The {% ssi %} tag is deprecated. Use the {% include %} tag instead.",
RemovedInDjango20Warning,
)
bits = token.split_contents()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
filepath = parser.compile_filter(bits[1])
return SsiNode(filepath, parsed)
@register.tag
def load(parser, token):
"""
Loads a custom template tag set.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
try:
taglib = bits[-1]
lib = get_library(taglib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
else:
temp_lib = Library()
for name in bits[1:-2]:
if name in lib.tags:
temp_lib.tags[name] = lib.tags[name]
# a name could be a tag *and* a filter, so check for both
if name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
elif name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
else:
raise TemplateSyntaxError("'%s' is not a valid tag or filter in tag library '%s'" %
(name, taglib))
parser.add_library(temp_lib)
else:
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError("'%s' is not a valid tag library: %s" %
(taglib, e))
return LoadNode()
@register.tag
def lorem(parser, token):
"""
Creates random Latin text useful for providing test data in templates.
Usage format::
{% lorem [count] [method] [random] %}
``count`` is a number (or variable) containing the number of paragraphs or
words to generate (default is 1).
``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for
plain-text paragraph blocks (default is ``b``).
``random`` is the word ``random``, which if given, does not use the common
paragraph (starting "Lorem ipsum dolor sit amet, consectetuer...").
Examples:
* ``{% lorem %}`` will output the common "lorem ipsum" paragraph
* ``{% lorem 3 p %}`` will output the common "lorem ipsum" paragraph
and two random paragraphs each wrapped in HTML ``<p>`` tags
* ``{% lorem 2 w random %}`` will output two random latin words
"""
bits = list(token.split_contents())
tagname = bits[0]
# Random bit
common = bits[-1] != 'random'
if not common:
bits.pop()
# Method bit
if bits[-1] in ('w', 'p', 'b'):
method = bits.pop()
else:
method = 'b'
# Count bit
if len(bits) > 1:
count = bits.pop()
else:
count = '1'
count = parser.compile_filter(count)
if len(bits) != 1:
raise TemplateSyntaxError("Incorrect format for %r tag" % tagname)
return LoremNode(count, method, common)
@register.tag
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
asvar = None
if len(bits) == 4 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string, asvar)
@register.tag
def regroup(parser, token):
"""
Regroups a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that ``{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
bits = token.split_contents()
if len(bits) != 6:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(bits[1])
if bits[2] != 'by':
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
if bits[4] != 'as':
raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must"
" be 'as'")
var_name = bits[5]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(var_name +
VARIABLE_ATTRIBUTE_SEPARATOR +
bits[3])
return RegroupNode(target, expression, var_name)
@register.tag
def spaceless(parser, token):
"""
Removes whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(('endspaceless',))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, list(TemplateTagNode.mapping)))
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "path.to.some_view" arg1 arg2 %}
or
{% url "path.to.some_view" name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute Python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project.
Other arguments are space-separated values that will be filled in place of
positional and keyword arguments in the URL. Don't mix positional and
keyword arguments.
All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "app_name.client" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument can also be a named URL instead of the Python path to
the view callable. For example if the URLconf entry looks like this::
url('^client/(\d+)/$', name='client-detail-view')
then in the template you can use::
{% url "client-detail-view" client.id %}
There is even another possible value type for the first argument. It can be
the name of a template variable that will be evaluated to obtain the view
name or the URL name, e.g.::
{% with view_path="app_name.client" %}
{% url view_path client.id %}
{% endwith %}
or,
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
viewname = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def verbatim(parser, token):
"""
Stops the template engine from rendering the contents of this block tag.
Usage::
{% verbatim %}
{% don't process this %}
{% endverbatim %}
You can also designate a specific closing tag block (allowing the
unrendered use of ``{% endverbatim %}``)::
{% verbatim myblock %}
...
{% endverbatim myblock %}
"""
nodelist = parser.parse(('endverbatim',))
parser.delete_first_token()
return VerbatimNode(nodelist.render(Context()))
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src="bar.png" alt="Bar"
height="10" width="{% widthratio this_value max_value max_width %}" />
If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,
the image in the above example will be 88 pixels wide
(because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).
In some cases you might want to capture the result of widthratio in a
variable. It can be useful for instance in a blocktrans like this::
{% widthratio this_value max_value max_width as width %}
{% blocktrans %}The width is: {{ width }}{% endblocktrans %}
"""
bits = token.split_contents()
if len(bits) == 4:
tag, this_value_expr, max_value_expr, max_width = bits
asvar = None
elif len(bits) == 6:
tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits
if as_ != 'as':
raise TemplateSyntaxError("Invalid syntax in widthratio tag. Expecting 'as' keyword")
else:
raise TemplateSyntaxError("widthratio takes at least three arguments")
return WidthRatioNode(parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width),
asvar=asvar)
@register.tag('with')
def do_with(parser, token):
"""
Adds one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError("%r expected at least one variable "
"assignment" % bits[0])
if remaining_bits:
raise TemplateSyntaxError("%r received an invalid token: %r" %
(bits[0], remaining_bits[0]))
nodelist = parser.parse(('endwith',))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context)
| bsd-3-clause |
GENIEMC/GENIE_2_9_0 | data/validation/hA/fsiv.py | 5 | 22493 | #!/usr/bin/env python
"""
Author: Tomasz Golan
For help run one from the following commands:
'./fsiv.py --help' or './fsiv.py -h'
'python fsiv.py --help' or 'python fsiv.py -h'
"""
import os, re, sys, getopt, time
from subprocess import call
#GLOBAL VARIABLES
DAT_DIR = os.environ.get('FSIV_DAT_DIR') or 'data_files' #set a directory with data files (use 'pwd/data' if $FSIV_DAT_DIR is not defined)
SIM_DIR = os.environ.get('FSIV_SIM_DIR') or 'sim_files' #set a directory for root/xsec files (use 'pwd/sim_files' if $FSIV_SIM_DIR is not defined)
PNG_DIR = os.environ.get('FSIV_PNG_DIR') or 'png_files' #set a directory for plots (use 'pwd/png_files' if $FSIV_PNG_DIR is not defined)
LOG_DIR = os.environ.get('FSIV_LOG_DIR') or 'log_files' #set a direcotry for log files (use 'pwd/log_files' if $FSIV_LOG_DIR is not defined)
BSH_DIR = os.environ.get('FSIV_BSH_DIR') or 'bash_scripts' #set a direcotry for bash scripts (use 'pwd/bash_files' if $FSIV_BSH_DIR is not defined)
authors = [] #storage for the list of available authors
tot_proc_list = [] #storage for the list of available processes for total cross section
date_ = time.strftime("%b") + "_" + time.strftime("%d") + "_" + time.strftime("%y")
#CLASSES
class Author:
#store available processes for given author
def __init__ (self, name):
self.name = name #author's name
self.pion = self.nucl = self.kaon = self.tot = self.nrg = self.ang = False #author's switchers for simluations/plots to be done
self.tot_proc = [] #storage for processes for total cross section data ([particle, tk_min, tk_max, target, fate])
def __cmp__(self, name): return cmp(self.name, name) #compare authors by name
def __repr__(self): return repr(self.name)
def print_ (self, dest = None): #print switchers for given author
print >> dest, '-'*22
print >> dest, self.name,':'
print >> dest, '-'*22
print >> dest, 'Pion mode:\t',self.pion
print >> dest, 'Nucleon mode:\t',self.nucl
print >> dest, 'Kaon mode:\t',self.kaon
print >> dest, 'Xsec data:\t',self.tot
print >> dest, 'Energy data:\t',self.nrg
print >> dest, 'Angle data:\t',self.ang
def check (self, particle): #return particle switcher for given particle
if todo_authors and self.name not in todo_authors: return False
if (particle == 'pion'): return self.pion
elif (particle == 'nucleon'): return self.nucl
elif (particle == 'kaon'): return self.kaon
class TotalProcess:
#store information about particle, target and energy range for total cross section
def __init__ (self, proc): #create proccess from proc = [particle, [tk list], xsec_max, target, fate]
self.particle = proc[0]
if self.particle in ['p','n']: self.particle_kind = 'nucleon'
elif self.particle in ['kp', 'km']: self.particle_kind = 'kaon'
else: self.particle_kind = 'pion'
self.tk_data = set(proc[1])
self.tk_max = [max(proc[1])]
self.xsec_max = [proc[2]]
self.target = proc[3]
self.fate = [proc[4]]
self.name = self.particle + '_' + self.target
def __cmp__(self, x): return cmp(self.name, x) #compare processes by name = particle_target
def __repr__(self): return repr(self.name)
def update (self,x): #update process information
self.tk_data.update(x.tk_data)
if x.fate[0] not in self.fate: #add new fate (and new xsec max) if necessary
self.fate.append(x.fate[0])
self.xsec_max.append(x.xsec_max[0])
self.tk_max.append(x.tk_max[0])
else: #if fate exists -> update xsec max and energy max [for plot range]
index = self.fate.index(x.fate[0])
if x.xsec_max[0] > self.xsec_max[index]: self.xsec_max[index] = x.xsec_max[0]
if x.tk_max[0] > self.tk_max[index]: self.tk_max[index] = x.tk_max[0]
def print_ (self, dest = None):
print >> dest, ' * ', self.particle, '+', self.target
print >> dest, '\tTk found in data = ', str(sorted(self.tk_data))
print >> dest, '\tTk prepared for simulation = ', str(sorted(self.tk_sim))
print >> dest, '\tFates:'
for f in self.fate:
index = self.fate.index(f)
print >> dest, '\t\t', f, '\t-> ', 'Tk max = ', self.tk_max[index], ', xsec max = ', self.xsec_max[index]
#FUNCTIONS
def help():
print '\n','*'*110
print """
fsiv.py runs 'runfast.pl' and 'intranukeplotter.pl' in a loop (assuming they are in the same directory)
the data are taken from the directory defined in $FSIV_DAT_DIR environmental variable
if $FSIV_DAT_DIR is not defined 'pwd/data_files' directory is taken instead
the output root/xsec files are saved into the directory defined in $FSIV_SIM_DIR environmental variable
if $FSIV_SIM_DIR is not defined 'pwd/sim_files' is used
the output png files are saved into the directory defined in $FSIV_PNG_DIR environmental variable
if $FSIV_PNG_DIR is not defined 'pwd/png_files' is used
the bash scripts are saved into the directory defined in $FSIV_BSH_DIR environmental variable
if $FSIV_BSH_DIR is not defined 'pwd/bash_scripts' is used
the log files are saved into the directory defined in $FSIV_LOG_DIR environmental variable
if $FSIV_LOG_DIR is not defined 'pwd/log_files' is used
by default all available data sets are done unless otherwise is specified
available options are:
-p, --pion \t -> turn on pion validation
-n, --nucleon \t -> turn on nucleon validation
-k, --kaon \t -> turn on kaon validation
*** if none from aboves is chosen they all are set on ***
-t, --total \t -> turn on total cross section validation
-e, --energy \t -> turn on dsigma / denergy validation
-a, --angle \t -> turn on dsigma / dangle validation
*** if none from aboves is chosen they all are set on ***
-s, --simulation \t -> turn on 'runfast.pl' to generate root files
-f, --plot \t -> turn on 'intrenukeplotter.pl' to generate plots
*** if none from aboves is chosen they all are set on ***
--nof_events_diff= -> number of events for differential cross section (default=1,000,000)
--nof_events_total= -> number of events for total cross section (default=100,000)
--command= \t -> command
--FNAL \t -> set command for FNAL grid (i.e. jobsub -e GENIE -e LD_LIBRARY_PATH -e PATH)
--authors= \t -> list of authors to be done (only for differential xsec)
--isospins= \t -> list of particles to be done (only for total xsec): pip,pim,p,n,kp,km
Examples:
1. run simulations and create plots for all available pion data sets for total cross section
./fsiv.py --pion --total
2. create only plots for author1 and author2
./fsiv.py -n --plots --authors='author1 author2'
3. run simulations and create plots for all particles for dsigma/denergy data sets with custom command
./fsiv.py --energy --command='my_command --option'
4. run only simulations for total cross section for pip and kp with 1000 events
./fsiv.py -t --isospin='pip kp' --nof_events_total=1000
"""
print '*'*110,'\n'
def init ():
#set global switchers respect to the command line arguments (see help for details)
global do_pion, do_nucl, do_kaon, do_tot, do_nrg, do_ang, do_sims, do_plot, nof_events_diff, nof_events_total, command, log_file, todo_authors, todo_isospins
command = None #command to run bash scripts (e.g. jobsub -q script_name.sh)
nof_events_diff = '1000000' #number of events for runfast.pl for differential cross section
nof_events_total = '100000' #number of events for runfast.pl for total cross section
do_pion = do_nucl = do_kaon = False #switchers for particles
do_tot = do_nrg = do_ang = False #switchers for types of plots
do_sims = do_plot = False #switchers for simulations and plots
todo_authors = None #the list of authors to be done for diff xsec (if not defined -> all are done)
todo_isospins = None #the list of targets to be done (if not defined -> all are done)
try:
opts, args = getopt.getopt(sys.argv[1:], "hpnkteasf", ["help", "pion", "nucleon","kaon","total","energy","angle","simulation","plot","nof_events_diff=","nof_events_total=","command=","authors=","isospins=","FNAL"])
except getopt.GetoptError as err:
help()
print 'INPUT ERROR: ', str(err), '\n'
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
help()
sys.exit(0)
elif o in ("-p", "--pion"): do_pion = True
elif o in ("-n", "--nucleon"): do_nucl = True
elif o in ("-k", "--kaon"): do_kaon = True
elif o in ("-t", "--total"): do_tot = True
elif o in ("-e", "--energy"): do_nrg = True
elif o in ("-a", "--angle"): do_ang = True
elif o in ("-s", "--simulation"): do_sims = True
elif o in ("-f", "--plot"): do_plot = True
elif o in ("--nof_events_diff"): nof_events_diff = a
elif o in ("--nof_events_total"): nof_events_total = a
elif o in ("--command"): command = a
elif o in ("--authors"): todo_authors = a.split(' ')
elif o in ("--isospins"): todo_isospins = a.split(' ')
elif o in ("--FNAL"): command = "jobsub -e GENIE -e LD_LIBRARY_PATH -e PATH -e HOME"
if not do_pion + do_nucl + do_kaon: do_pion = do_nucl = do_kaon = True #if no particle is chosen all are set on
if not do_tot + do_nrg + do_ang: do_tot = do_nrg = do_ang = True #if no xsec type is chosen all are set on
if not do_sims + do_plot: do_sims = do_plot = True #if no task is selected do both: run simulations and prepare plots
print_options()
if not opts: print "\nYou did not choose any option. Default settings will be used. Run ./fsiv.py -h to see available options."
while True:
c = raw_input("\nDo you want to proceed ['Y' or 'N']? ")
if c.lower() == 'n': sys.exit()
if c.lower() == 'y': break;
call(["mkdir", "-p", LOG_DIR])
log_file = open(LOG_DIR + "/fsiv_" + date_ + ".log", "w")
def print_options (dest=None):
print >> dest, '\nThe following options were chosen:\n'
print >> dest, 'Pion mode:\t',do_pion
print >> dest, 'Nucleon mode:\t',do_nucl
print >> dest, 'Kaon mode:\t',do_kaon
print >> dest, '\nXsec data:\t',do_tot
print >> dest, 'Energy data:\t',do_nrg
print >> dest, 'Angle data:\t', do_ang
print >> dest, '\nSimulations:\t', do_sims
print >> dest, 'Plots:\t\t', do_plot
print >> dest, '\nNof events:\t', nof_events_diff, '(differential), ', nof_events_total, '(total)'
print >> dest, 'Command:\t', command
print >> dest, 'Author cut:\t', todo_authors
print >> dest, 'Isospin cut:\t', todo_isospins
print >> dest, '\nData directory:\t', DAT_DIR
print >> dest, 'ROOT/xsec dir:\t', SIM_DIR
print >> dest, 'PNG directory:\t', PNG_DIR
print >> dest, 'Logs directory:\t', LOG_DIR
print >> dest, 'Scripts dir:\t', BSH_DIR
def read_authors(dir_):
#read authors list from given directory
global wrongfiles
wrongfiles=[]
fates = ['total', 'tot', 'reac', 'elas', 'inel', 'abs', 'cex']
pions = ['pip', 'pim']
nucls = ['p', 'n', 'pp', 'np']
kaons = ['kp', 'km']
for root, dirs, files in os.walk(dir_):
if '.svn' in dirs:
dirs.remove('.svn') #skip .svn directory
for file in files:
if file.endswith(".dat"): #look only at files with 'dat' extension
filename = re.split('[- .]',file) #extract process info assuimng the following convention: author-particle-target-fate.dat (total) or author-particle-*.dat (differential)
if len(filename) < 5: #skip file with less then 4 parts in the filename
wrongfiles.append(file)
continue
author = filename[0]
particle = filename[1]
if author not in authors: authors.append(Author(author)) #add author to the list (if not exist)
index = authors.index(author) #set index for given author
if particle in pions: authors[index].pion = True #add particle mode corresponding to filename
elif particle in nucls: authors[index].nucl = True
elif particle in kaons: authors[index].kaon = True
else:
wrongfiles.append(file)
continue
if filename[3] in fates: #is total cross section data
authors[index].tot = True #add total xsec distribution for this author
target = filename[2]
fate = filename[3]
tk = map(float,list(line.split()[0] for line in open(root+'/'+file, 'r') \
if line.strip() and not line.strip().startswith('#'))) #take first column of any non-empty and non-commented line (kinetic energy)
xs = map(float,list(line.split()[1] for line in open(root+'/'+file, 'r') \
if line.strip() and not line.strip().startswith('#'))) #take second column of any non-empty and non-commented line (xsec)
authors[index].tot_proc.append( \
[particle, \
tk,
max(xs), \
target, \
fate]) #add proccess information in the following format: [particle, [tk list], xsec_max, target, fate]
elif file.endswith("angdist.dat"): authors[index].ang = True #add dsigma/dangle distribution for this author if the file ends with 'angdist.dat'
else: authors[index].nrg = True; #add disgma/denergy distribution if not 'total' nor 'angdist'
def auto_set_tk ():
#remove tk points where to dense and add where to rare
for proc in tot_proc_list: #for each available process for total xsec
res = [] #storage for result
temp = set([int(x) for x in proc.tk_data]) #take data tk points (and round them to int)
temp = filter(lambda x: x > 10, temp) #remove all points less than 10
if len(temp) < 3: res = temp #do nothing if there are less than 3 points
else:
temp = sorted(temp, reverse=True) #sort points (so pop() takes the lowest value)
res.append(temp.pop()) #save first tk point
first_tk = sum_tk = temp.pop() #take the lowest
counter = 1 #and set counter to 1
while True: #start loop
next_tk = temp.pop() #take next one
if not temp: #if it is the last one
res.append(sum_tk/counter) #save the current average tk
res.append(next_tk) #save the last tk value
break #and stop the loop
if float(next_tk)/float(first_tk) < 1.25: #if difference between first_tk and next_tk is smaller than 25%
sum_tk += next_tk #add next_tk to the sum
counter += 1 #and increase counter
else: #if not
res.append(sum_tk/counter) #save current average tk
counter = 1 #set counter to 1
first_tk = sum_tk = next_tk #next_tk is new first_tk
res.sort()
todo = True
while todo: #start loop
for x in res: #go through all tk points
index = res.index(x)
if not index == len(res)-1: #if it is not last point
if float(res[index+1]) / float(x) > 2.0: #if the difference between two adjacent points is greater than 200%
res.append(int((x+res[index+1])/2)) #add the average of them
res.sort()
break #and start for loop from begining
else: #stop main loop if reach last point
todo = False
break
res.sort()
proc.tk_sim = set(res)
def prepare_total():
#create a list of particle-nuclei processes to be simulated
for author in filter(lambda a: a.tot, authors): #for all authors with total xsec data
for tp in author.tot_proc: #take each type of process
temp = TotalProcess(tp) #add or update if already exists
if temp not in tot_proc_list: tot_proc_list.append(temp)
else: tot_proc_list[tot_proc_list.index(temp)].update(temp)
auto_set_tk()
def log(text = None):
#print 'text' into a log file
if text: print >> log_file, text
#if 'text' is not given, print the list of chosen options, found authors and list of processes to be simulated for total xsec
else:
print_options(log_file)
print >> log_file, '\nThe following non-data files were found in ' + DAT_DIR + ': ', wrongfiles
print >> log_file, '\nThe following authors were found in ' + DAT_DIR + ':\n'
for author in authors: author.print_(log_file)
print >> log_file, '-'*22, '\n'
print >> log_file, 'The following processes/fates are available for total cross section:\n'
for x in tot_proc_list: x.print_(log_file)
log_file.flush()
def make_dirs(xsec,particle):
#create folders for logs, bash scripts, root/xsec files and plots
call(["mkdir", "-p", LOG_DIR+"/runfast/"+xsec+"/"+particle])
call(["mkdir", "-p", LOG_DIR+"/intranukeplotter/"+xsec+"/"+particle])
call(["mkdir", "-p", PNG_DIR+"/"+xsec+"/"+particle])
call(["mkdir", "-p", BSH_DIR+"/"+xsec+"/"+particle])
call(["mkdir", "-p", SIM_DIR+"/"+xsec+"/"+particle])
def create_dirs():
#call make_dirs for turn on processes
if do_tot:
if do_pion: make_dirs("total", "pion")
if do_nucl: make_dirs("total", "nucleon")
if do_kaon: make_dirs("total", "kaon")
if do_nrg :
if do_pion: make_dirs("dbldiff_dist", "pion")
if do_nucl: make_dirs("dbldiff_dist", "nucleon")
if do_kaon: make_dirs("dbldiff_dist", "kaon")
if do_ang:
if do_pion: make_dirs("ang_dist", "pion")
if do_nucl: make_dirs("ang_dist", "nucleon")
if do_kaon: make_dirs("ang_dist", "kaon")
def run_bs (bs):
#run bash script
print "\n\trunning " + bs + "\n"
log("\trunning " + bs)
if command: call(command + " 'bash " + bs + "'", shell=True) #if custom command is defined add it in front of 'bash bash_script.sh'
else: call("bash " + bs, shell=True)
def run_total (particle):
log("\nTotal xsec validation for " + particle + ":\n")
for proc in filter(lambda p: p.particle_kind == particle,tot_proc_list): #go through all available processes for given particle
if not todo_isospins or proc.particle in todo_isospins: #check isospin filter
bash_scrt = BSH_DIR + "/total/" + particle + "/" + proc.particle + "_" + proc.target + "_" + date_ + ".sh" #create bash script file
bash_file = open(bash_scrt, "w")
print >> bash_file, "#!/bin/bash"
if do_sims: #if simulations are on
runfast = "perl runfast.pl --type totxs --rm yes" \
+ " --p " + proc.particle \
+ " --t " + proc.target \
+ " --el '" + str(sorted(proc.tk_sim))[1:-1] + "'" \
+ " --n " + nof_events_total \
+ " --rootdir " + SIM_DIR + "/total/" + particle \
+ " 1>/dev/null 2>" + LOG_DIR + "/runfast/total/" + particle + "/" + proc.particle + "_" + proc.target + "_" + date_ + ".log" #prepare runfast.pl command
print >> bash_file, runfast #put the command to bash sctipt
if do_plot: #if plots are on
print >> bash_file, "shopt -s nocaseglob" #turn off case sensivity
temp = "f=$(ls " + SIM_DIR + "/total/" + particle + "/*_" + proc.particle + "_" + proc.target + "_*.txt -Art | tail -n 1)" #bash script will find the recent *_particle_target_*.txt file
print >> bash_file, temp
print >> bash_file, "m=$(echo ${f##*/} | cut -d'_' -f 1)" #and extract its date for intranukeplotter
print >> bash_file, "d=$(echo ${f##*/} | cut -d'_' -f 2)"
print >> bash_file, "y=$(echo ${f##*/} | cut -d'_' -f 3)"
for fate in proc.fate: #for each available fate for given process
index = proc.fate.index(fate)
plotter = "perl intranukeplotter.pl --type totxs --rm yes" \
+ " --stype " + fate \
+ " --p " + proc.particle \
+ " --t " + proc.target \
+ " --vmax " + str(1.3*proc.xsec_max[index]) \
+ " --hmax " + str(1.1*proc.tk_max[index]) \
+ " --dorf $m'_'$d'_'$y" \
+ " --rootdir " + SIM_DIR + "/total/" + particle \
+ " --datadir " + DAT_DIR + "/total/" + proc.particle_kind \
+ " --pngdir " + PNG_DIR + "/total/" + proc.particle_kind \
+ " > " + LOG_DIR + "/intranukeplotter/total/" + particle + "/" + proc.particle + "_" + proc.target + "_" + date_ + ".log" #prepare intranukeplotter.pl command
print >> bash_file, plotter #put the command to bash script
bash_file.close()
run_bs(bash_scrt) #run bash script
def run_diff (particle,ang=False):
if ang: log("\ndsigma/dtheta validation for " + particle + ":\n")
else: log("\ndsigma/dE validation for " + particle + ":\n")
dir_ = "dbldiff_dist"
type_ = "nrg"
if ang:
dir_ = "ang_dist"
type_ = "ang"
for author in filter(lambda a: a.check(particle),authors): #go through all available authors for given particle
if (not ang and author.nrg) or (ang and author.ang):
bash_scrt = BSH_DIR + "/" + dir_ + "/" + particle + "/" + author.name + "_" + date_ + ".sh" #create bash script file
bash_file = open(bash_scrt, "w")
print >> bash_file, "#!/bin/bash"
if do_sims: #if simulations are on
runfast = "perl runfast.pl --type root --rm yes --name yes" \
+ " --n " + nof_events_diff \
+ " --a " + author.name \
+ " --rootdir " + SIM_DIR + "/" + dir_ + "/" + particle \
+ " 1>/dev/null 2>" + LOG_DIR + "/runfast/" + dir_ + "/" + particle + "/" + author.name + "_" + date_ + ".log" #prepare runfast.pl command
print >> bash_file, runfast #put the command to bash script
if do_plot: #if plots are on
print >> bash_file, "shopt -s nocaseglob" #turn off case sensivity
temp = "f=$(ls " + SIM_DIR + "/" + dir_ + "/" + particle + "/" + author.name + "_*.root -Art | tail -n 1)" #bash script will find the recent author_*.root file
print >> bash_file, temp
print >> bash_file, "m=$(echo ${f##*/} | cut -d'_' -f 2)" #and extract its date for intranukeplotter
print >> bash_file, "d=$(echo ${f##*/} | cut -d'_' -f 3)"
print >> bash_file, "y=$(echo ${f##*/} | cut -d'_' -f 4)"
plotter = "perl intranukeplotter.pl --type " + type_ + " --name yes --rm yes" \
+ " --rootdir " + SIM_DIR + "/" + dir_ + "/" + particle \
+ " --datadir " + DAT_DIR + "/" + dir_ + "/" + particle \
+ " --pngdir " + PNG_DIR + "/" + dir_ + "/" + particle \
+ " --dorf $m'_'$d'_'$y" \
+ " --a " + author.name \
+ " >" + LOG_DIR + "/intranukeplotter/" + dir_ + "/" + particle + "/" + author.name + "_" + date_ + ".log" #prepare intrnukeplotter.pl command
print >> bash_file, plotter #put the command to bash script
bash_file.close()
run_bs(bash_scrt) #run bash script
#MAIN PROGRAM
if __name__ == "__main__":
init()
read_authors(DAT_DIR)
prepare_total()
log()
create_dirs()
if do_tot:
if do_pion: run_total ('pion')
if do_nucl: run_total ('nucleon')
if do_kaon: run_total ('kaon')
if do_nrg:
if do_pion: run_diff ('pion')
if do_nucl: run_diff ('nucleon')
if do_kaon: run_diff ('kaon')
if do_ang:
if do_pion: run_diff ('pion',True)
if do_nucl: run_diff ('nucleon',True)
if do_kaon: run_diff ('kaon',True)
log('\nDONE')
| gpl-3.0 |
bottompawn/kbengine | kbe/src/lib/python/Lib/encodings/iso8859_9.py | 272 | 13156 | """ Python Character Mapping Codec iso8859_9 generated from 'MAPPINGS/ISO8859/8859-9.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-9',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
xylsxyls/xueyelingshuang | src/BigNumberBase/scripts/rebuild_BigNumberBase.py | 1 | 14102 | #!python3
# -*- coding:utf-8 -*-
import os
import sys
import time
import ctypes
import shutil
import subprocess
IsPy3 = sys.version_info[0] >= 3
if IsPy3:
import winreg
else:
import codecs
import _winreg as winreg
BuildType = 'Release'
IsRebuild = True
Build = 'Rebuild'
Update = False
Copy = False
CleanAll = False
BuildTimeout = 30*60
Bit = 'Win32'
Dlllib = 'dll'
MSBuild = None
IncrediBuild = None
UseMSBuild = True #默认用MSBuild编译,如果为False则用IncrediBuild编译
#不同项目只需修改下面5个变量
SlnFile = '../BigNumberBase.sln' #相对于本py脚本路径的相对路径
UpdateDir = [] #相对于本py脚本路径的相对路径,填空不更新
ExecBatList = [] #相对于本py脚本路径的相对路径,编译前调用的脚本,可填空,执行bat会先cd到bat目录再执行
MSBuildFirstProjects = [r'BigNumberBase'] #使用MSBuild需要工程文件在解决方案sln中的路径
# MSBuild首先编译的项目,填空不指定顺序
IncrediBuildFirstProjects = ['BigNumberBase'] #使用IncrediBuild只需工程名字
#IncrediBuild首先编译的项目,填空不指定顺序
class ConsoleColor():
'''This class defines the values of color for printing on console window'''
Black = 0
DarkBlue = 1
DarkGreen = 2
DarkCyan = 3
DarkRed = 4
DarkMagenta = 5
DarkYellow = 6
Gray = 7
DarkGray = 8
Blue = 9
Green = 10
Cyan = 11
Red = 12
Magenta = 13
Yellow = 14
White = 15
class Coord(ctypes.Structure):
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRect(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short),
]
class ConsoleScreenBufferInfo(ctypes.Structure):
_fields_ = [('dwSize', Coord),
('dwCursorPosition', Coord),
('wAttributes', ctypes.c_uint),
('srWindow', SmallRect),
('dwMaximumWindowSize', Coord),
]
class Win32API():
'''Some native methods for python calling'''
StdOutputHandle = -11
ConsoleOutputHandle = None
DefaultColor = None
@staticmethod
def SetConsoleColor(color):
'''Change the text color on console window'''
if not Win32API.DefaultColor:
if not Win32API.ConsoleOutputHandle:
Win32API.ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(Win32API.StdOutputHandle)
bufferInfo = ConsoleScreenBufferInfo()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(Win32API.ConsoleOutputHandle, ctypes.byref(bufferInfo))
Win32API.DefaultColor = int(bufferInfo.wAttributes & 0xFF)
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, color)
@staticmethod
def ResetConsoleColor():
'''Reset the default text color on console window'''
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, Win32API.DefaultColor)
class Logger():
LogFile = '@AutomationLog.txt'
LineSep = '\n'
@staticmethod
def Write(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
if printToStdout:
isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)
if isValidColor:
Win32API.SetConsoleColor(consoleColor)
try:
sys.stdout.write(log)
except UnicodeError as e:
Win32API.SetConsoleColor(ConsoleColor.Red)
isValidColor = True
sys.stdout.write(str(type(e)) + ' can\'t print the log!\n')
if isValidColor:
Win32API.ResetConsoleColor()
if not writeToFile:
return
if IsPy3:
logFile = open(Logger.LogFile, 'a+', encoding = 'utf-8')
else:
logFile = codecs.open(Logger.LogFile, 'a+', 'utf-8')
try:
logFile.write(log)
# logFile.flush() # need flush in python 3, otherwise log won't be saved
except Exception as ex:
logFile.close()
sys.stdout.write('can not write log with exception: {0} {1}'.format(type(ex), ex))
@staticmethod
def WriteLine(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
Logger.Write(log + Logger.LineSep, consoleColor, writeToFile, printToStdout)
@staticmethod
def Log(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
t = time.localtime()
log = '{0}-{1:02}-{2:02} {3:02}:{4:02}:{5:02} - {6}{7}'.format(t.tm_year, t.tm_mon, t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec, log, Logger.LineSep)
Logger.Write(log, consoleColor, writeToFile, printToStdout)
@staticmethod
def DeleteLog():
if os.path.exists(Logger.LogFile):
os.remove(Logger.LogFile)
def GetMSBuildPath():
if Bit == 'Win32':
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" x86\nwhere msbuild'
elif Bit == 'x64':
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" amd64\nwhere msbuild'
ftemp = open('GetMSBuildPath.bat', 'wt')
ftemp.write(cmd)
ftemp.close()
p = subprocess.Popen('GetMSBuildPath.bat', stdout = subprocess.PIPE)
p.wait()
lines = p.stdout.read().decode().splitlines()
os.remove('GetMSBuildPath.bat')
for line in lines:
if 'MSBuild.exe' in line:
return line
def GetIncrediBuildPath():
try:
key=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Classes\IncrediBuild.MonitorFile\shell\open\command')
value, typeId = winreg.QueryValueEx(key, '')
if value:
start = value.find('"')
end = value.find('"', start + 1)
path = value[start+1:end]
buildConsole = os.path.join(os.path.dirname(path), 'BuildConsole.exe')
return buildConsole
except FileNotFoundError as e:
Logger.WriteLine('can not find IncrediBuild', ConsoleColor.Red)
def UpdateCode():
# put git to path first
if not shutil.which('git.exe'):
Logger.Log('找不到git.exe. 请确认安装git时将git\bin目录路径加入到环境变量path中!!!\n, 跳过更新代码!!!', ConsoleColor.Yellow)
return false
oldDir = os.getcwd()
for dir in UpdateDir:
os.chdir(dir)
ret = os.system('git pull')
os.chdir(oldDir)
if ret != 0:
Logger.Log('update {0} failed'.format(dir), ConsoleColor.Yellow)
return false
return True
def BuildProject(cmd):
for i in range(6):
Logger.WriteLine(cmd, ConsoleColor.Cyan)
buildFailed = True
startTime = time.time()
p = subprocess.Popen(cmd) #IncrediBuild不能使用stdout=subprocess.PIPE,否则会导致p.wait()不返回,可能是IncrediBuild的bug
if IsPy3:
try:
buildFailed = p.wait(BuildTimeout)
except subprocess.TimeoutExpired as e:
Logger.Log('{0}'.format(e), ConsoleColor.Yellow)
p.kill()
else:
buildFailed = p.wait()
if not UseMSBuild:
#IncrediBuild的返回值不能说明编译是否成功,需要提取输出判断
fin = open('IncrediBuild.log')
for line in fin:
if line.startswith('=========='):
Logger.Write(line, ConsoleColor.Cyan, writeToFile = True if IsPy3 else False)
if IsPy3:
start = line.find('失败') + 3 #========== 生成: 成功 1 个,失败 0 个,最新 0 个,跳过 0 个 ==========
else:#为了兼容py2做的特殊处理,很恶心
start = 0
n2 = 0
while 1:
if line[start].isdigit():
n2 += 1
if n2 == 2:
break
start = line.find(' ', start)
start += 1
end = line.find(' ', start)
failCount = int(line[start:end])
buildFailed = failCount > 0
else:
Logger.Write(line, ConsoleColor.Red, writeToFile = True if IsPy3 else False, printToStdout = True if ' error ' in line else False)
fin.close()
costTime = time.time() - startTime
Logger.WriteLine('build cost time: {0:.1f}s\n'.format(costTime), ConsoleColor.Green)
if not buildFailed:
return True
return False
def BuildAllProjects():
buildSuccess = False
cmds = []
if UseMSBuild:
if IsRebuild:
if CleanAll:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Debug'))
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Release'))
else:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))
for project in MSBuildFirstProjects:
cmds.append('{0} {1} /t:{2} /p:Configuration={3};platform={4} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, project, BuildType, Bit))
cmds.append('{0} {1} /p:Configuration={2};platform={3} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType, Bit))
else: #IncrediBuild
if IsRebuild:
if CleanAll:
cmds.append('"{0}" {1} /clean /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Debug', Bit))
cmds.append('"{0}" {1} /clean /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Release', Bit))
else:
cmds.append('"{0}" {1} /clean /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType, Bit))
for project in IncrediBuildFirstProjects:
cmds.append('"{0}" {1} /build /prj={2} /cfg="{3}|{4}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, project, BuildType, Bit))
cmds.append('"{0}" {1} /build /cfg="{2}|{3}" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType, Bit))
for cmd in cmds:
buildSuccess = BuildProject(cmd)
if not buildSuccess:
break
return buildSuccess
def main():
if UseMSBuild:
if not os.path.exists(MSBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
else:
if not os.path.exists(IncrediBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
dir = os.path.dirname(__file__)
if dir:
oldDir = os.getcwd()
os.chdir(dir)
if Update:
if not UpdateCode():
return 1
Logger.Log('git update succeed', ConsoleColor.Green)
if Copy:
for bat in ExecBatList:
oldBatDir = os.getcwd()
batDir = os.path.dirname(bat)
batName = os.path.basename(bat)
if batDir:
os.chdir(batDir)
start = time.clock()
os.system(batName)
Logger.Log('run "{}" cost {:.1f} seconds'.format(batName, time.clock() - start), ConsoleColor.Green)
if batDir:
os.chdir(oldBatDir)
buildSuccess = BuildAllProjects()
if buildSuccess:
Logger.Log('build succeed', ConsoleColor.Green)
else:
Logger.Log('build failed', ConsoleColor.Red)
if dir:
os.chdir(oldDir)
return 0 if buildSuccess else 1
if __name__ == '__main__':
Logger.Log('run with argv ' + str(sys.argv), ConsoleColor.Green)
sys.argv = [x.lower() for x in sys.argv]
start_time = time.time()
if 'debug' in sys.argv:
BuildType = 'Debug'
if 'lib' in sys.argv:
Dlllib = 'lib'
SlnFile = '../BigNumberBase_lib.sln'
MSBuildFirstProjects = [r'BigNumberBase_lib']
IncrediBuildFirstProjects = ['BigNumberBase_lib']
if '64' in sys.argv:
Bit = 'x64'
if 'build' in sys.argv:
IsRebuild = False
Build = 'Build'
if 'update' in sys.argv:
Update = True
if 'copy' in sys.argv:
Copy = True
if 'clean' in sys.argv:
CleanAll = True
if 'incredibuild' in sys.argv:
UseMSBuild = False
if UseMSBuild:
MSBuild = GetMSBuildPath()
if not MSBuild:
Logger.Log('can not find MSBuild.exe', ConsoleColor.Red)
exit(1)
else:
IncrediBuild = GetIncrediBuildPath()
if not IncrediBuild:
Logger.Log('can not find BuildConsole.exe', ConsoleColor.Red)
exit(1)
cwd = os.getcwd()
Logger.WriteLine('current dir is: {0}, {1}: {2}'.format(cwd, Build, BuildType))
ret = main()
end_time = time.time()
cost_time = end_time-start_time
Logger.WriteLine('all build cost time: {0:.2f} seconds'.format(cost_time), ConsoleColor.Green)
exit(ret) | mit |
ChanChiChoi/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
hugs/django | django/contrib/gis/geos/prototypes/__init__.py | 8 | 1585 | """
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
# Coordinate sequence routines.
from django.contrib.gis.geos.prototypes.coordseq import create_cs, get_cs, \
cs_clone, cs_getordinate, cs_setordinate, cs_getx, cs_gety, cs_getz, \
cs_setx, cs_sety, cs_setz, cs_getsize, cs_getdims
# Geometry routines.
from django.contrib.gis.geos.prototypes.geom import from_hex, from_wkb, from_wkt, \
create_point, create_linestring, create_linearring, create_polygon, create_collection, \
destroy_geom, get_extring, get_intring, get_nrings, get_geomn, geom_clone, \
geos_normalize, geos_type, geos_typeid, geos_get_srid, geos_set_srid, \
get_dims, get_num_coords, get_num_geoms, \
to_hex, to_wkb, to_wkt
# Miscellaneous routines.
from django.contrib.gis.geos.prototypes.misc import geos_area, geos_distance, geos_length
# Predicates
from django.contrib.gis.geos.prototypes.predicates import geos_hasz, geos_isempty, \
geos_isring, geos_issimple, geos_isvalid, geos_contains, geos_crosses, \
geos_disjoint, geos_equals, geos_equalsexact, geos_intersects, \
geos_intersects, geos_overlaps, geos_relatepattern, geos_touches, geos_within
# Topology routines
from django.contrib.gis.geos.prototypes.topology import \
geos_boundary, geos_buffer, geos_centroid, geos_convexhull, geos_difference, \
geos_envelope, geos_intersection, geos_pointonsurface, geos_preservesimplify, \
geos_simplify, geos_symdifference, geos_union, geos_relate
| bsd-3-clause |
Toshakins/wagtail | wagtail/wagtailcore/tests/test_migrations.py | 7 | 2212 | """
Check that all changes to Wagtail models have had migrations created. If there
are outstanding model changes that need migrations, fail the tests.
"""
from __future__ import absolute_import, unicode_literals
from django.apps import apps
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.state import ProjectState
from django.test import TestCase
from django.utils.six import iteritems
class TestForMigrations(TestCase):
def test__migrations(self):
app_labels = set(app.label for app in apps.get_app_configs()
if app.name.startswith('wagtail.'))
for app_label in app_labels:
apps.get_app_config(app_label.split('.')[-1])
loader = MigrationLoader(None, ignore_no_migrations=True)
conflicts = dict(
(app_label, conflict)
for app_label, conflict in iteritems(loader.detect_conflicts())
if app_label in app_labels
)
if conflicts:
name_str = "; ".join("%s in %s" % (", ".join(names), app)
for app, names in conflicts.items())
self.fail("Conflicting migrations detected (%s)." % name_str)
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
MigrationQuestioner(specified_apps=app_labels, dry_run=True),
)
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
)
if changes:
migrations = '\n'.join((
' {migration}\n{changes}'.format(
migration=migration,
changes='\n'.join(' {0}'.format(operation.describe())
for operation in migration.operations))
for (_, migrations) in changes.items()
for migration in migrations))
self.fail('Model changes with no migrations detected:\n%s' % migrations)
| bsd-3-clause |
shubhdev/edx-platform | common/djangoapps/embargo/admin.py | 154 | 1315 | """
Django admin page for embargo models
"""
from django.contrib import admin
import textwrap
from config_models.admin import ConfigurationModelAdmin
from embargo.models import IPFilter, CountryAccessRule, RestrictedCourse
from embargo.forms import IPFilterForm, RestrictedCourseForm
class IPFilterAdmin(ConfigurationModelAdmin):
"""Admin for blacklisting/whitelisting specific IP addresses"""
form = IPFilterForm
fieldsets = (
(None, {
'fields': ('enabled', 'whitelist', 'blacklist'),
'description': textwrap.dedent("""Enter specific IP addresses to explicitly
whitelist (not block) or blacklist (block) in the appropriate box below.
Separate IP addresses with a comma. Do not surround with quotes.
""")
}),
)
class CountryAccessRuleInline(admin.StackedInline):
"""Inline editor for country access rules. """
model = CountryAccessRule
extra = 1
def has_delete_permission(self, request, obj=None):
return True
class RestrictedCourseAdmin(admin.ModelAdmin):
"""Admin for configuring course restrictions. """
inlines = [CountryAccessRuleInline]
form = RestrictedCourseForm
admin.site.register(IPFilter, IPFilterAdmin)
admin.site.register(RestrictedCourse, RestrictedCourseAdmin)
| agpl-3.0 |
precedenceguo/mxnet | tests/python/unittest/test_module.py | 5 | 36954 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import mxnet.ndarray as nd
from mxnet.test_utils import *
import numpy as np
from functools import reduce
from mxnet.module.executor_group import DataParallelExecutorGroup
from common import setup_module, with_seed, assertRaises, teardown
from collections import namedtuple
@with_seed()
def test_module_dtype():
dtype = np.float16
dshape = (3, 8, 7)
sym = mx.sym.Variable('data')
sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')
mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, dtype, layout='TNC')])
mod.init_params()
mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape, dtype=dtype)],
label=None))
mod.backward([mx.nd.ones(dshape, dtype=dtype)])
for x in mod.get_outputs():
assert x.dtype == dtype
@with_seed()
def test_module_input_grads():
a = mx.sym.Variable('a', __layout__='NC')
b = mx.sym.Variable('b', __layout__='NC')
c = mx.sym.Variable('c', __layout__='NC')
c = a + 2 * b + 3 * c
net = mx.mod.Module(c, data_names=['b', 'c', 'a'], label_names=None,
context=[mx.cpu(0), mx.cpu(1)])
net.bind(data_shapes=[['b', (5, 5)], ['c', (5, 5)], ['a', (5, 5)]],
label_shapes=None, inputs_need_grad=True)
net.init_params()
net.forward(data_batch=mx.io.DataBatch(data=[nd.ones((5, 5)),
nd.ones((5, 5)),
nd.ones((5, 5))]))
net.backward(out_grads=[nd.ones((5, 5))])
input_grads = net.get_input_grads()
b_grad = input_grads[0].asnumpy()
c_grad = input_grads[1].asnumpy()
a_grad = input_grads[2].asnumpy()
assert np.all(a_grad == 1), a_grad
assert np.all(b_grad == 2), b_grad
assert np.all(c_grad == 3), c_grad
@with_seed()
def test_module_ctx_group():
def check_module_ctx_group(ctxs, group2ctxs, grad_ctxs=None):
with mx.AttrScope(ctx_group='dev1'):
a = mx.symbol.Variable('a')
a = a * 2
with mx.AttrScope(ctx_group='dev2'):
b = mx.symbol.Variable('b')
c = a + b
shape = (2, 5)
mod1 = mx.mod.Module(c, context=ctxs, data_names=['a', 'b'], label_names=None,
group2ctxs=group2ctxs)
mod1.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)
mod1.init_params()
mod1.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)
mod1.backward([mx.nd.ones(shape)])
mod1_input_grads = mod1.get_input_grads()
mod2 = mx.mod.Module(c, context=ctxs, data_names=['a', 'b'], label_names=None)
mod2.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)
mod2.init_params()
mod2.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)
mod2.backward([mx.nd.ones(shape)])
mod2_input_grads = mod2.get_input_grads()
if grad_ctxs is not None:
assert(mod1_input_grads[0].context == grad_ctxs[0])
assert(mod1_input_grads[1].context == grad_ctxs[1])
assert(np.all(mod1_input_grads[0].asnumpy() == mod2_input_grads[0].asnumpy()))
assert(np.all(mod1_input_grads[1].asnumpy() == mod2_input_grads[1].asnumpy()))
check_module_ctx_group([mx.cpu(0)], {'dev1': mx.cpu(1), 'dev2': mx.cpu(2)}, grad_ctxs=[mx.cpu(1), mx.cpu(2)])
check_module_ctx_group([mx.cpu(0), mx.cpu(1)],
[{'dev1': mx.cpu(2), 'dev2': mx.cpu(3)}, {'dev1': mx.cpu(4), 'dev2': mx.cpu(5)}])
check_module_ctx_group([mx.cpu(0), mx.cpu(1)], {'dev1': mx.cpu(2), 'dev2': mx.cpu(3)})
check_module_ctx_group([mx.cpu(0), mx.cpu(1)], {'dev1': mx.cpu(2), 'dev2': [mx.cpu(3)]})
check_module_ctx_group([mx.cpu(0), mx.cpu(1)], {'dev1':mx.cpu(2), 'dev2':[mx.cpu(3), mx.cpu(3)]})
check_module_ctx_group([mx.cpu(0), mx.cpu(1)],
{'dev1':[mx.cpu(2), mx.cpu(2)], 'dev2':[mx.cpu(3), mx.cpu(3)]})
@with_seed()
def test_bucket_module_ctx_group():
num_hidden = 10
batch_size = 5
def sym_gen(seq_len):
with mx.AttrScope(ctx_group='dev1'):
data = mx.symbol.Variable('data')
weight = mx.symbol.Variable('dev1_weight')
bias = mx.symbol.Variable('dev1_bias')
fc = data
for i in range(seq_len):
fc = mx.symbol.FullyConnected(data=fc, weight=weight, bias=bias,
name='dev1_fc_%d' % i, num_hidden=num_hidden)
with mx.AttrScope(ctx_group='dev2'):
label = mx.symbol.Variable('label')
weight = mx.symbol.Variable('dev2_weight')
bias = mx.symbol.Variable('dev2_bias')
for i in range(seq_len):
fc = mx.symbol.FullyConnected(data=fc, weight=weight, bias=bias,
name='dev2_fc_%d' % i, num_hidden=num_hidden)
sym = mx.symbol.SoftmaxOutput(fc, label, name='softmax')
return sym, ('data',), ('label',)
mod = mx.mod.BucketingModule(sym_gen=sym_gen, default_bucket_key=10, context=[mx.cpu(0)],
group2ctxs=[{'dev1': mx.cpu(1), 'dev2': mx.cpu(2)}])
mod.bind(data_shapes=[['data', (batch_size, num_hidden)]],
label_shapes=[['label', (batch_size,)]],
for_training=True, inputs_need_grad=True)
assert(mod.binded)
@with_seed()
def test_module_layout():
sym = mx.sym.Variable('data')
sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')
dshape = (3, 8, 7)
mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, layout='TNC')])
mod.init_params()
mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
label=None))
mod.backward([mx.nd.ones(dshape)])
assert mod.get_outputs()[0].shape == dshape
hdshape = (3, 4, 7)
for x in mod.get_outputs(merge_multi_context=False)[0]:
assert x.shape == hdshape
@with_seed()
def test_save_load():
def dict_equ(a, b):
assert set(a) == set(b)
for k in a:
assert (a[k].asnumpy() == b[k].asnumpy()).all()
sym = mx.sym.Variable('data')
sym = mx.sym.FullyConnected(sym, num_hidden=100)
# single device
mod = mx.mod.Module(sym, ('data',))
mod.bind(data_shapes=[('data', (10, 10))])
mod.init_params()
mod.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})
mod.update()
mod.save_checkpoint('test', 0, save_optimizer_states=True)
mod2 = mx.mod.Module.load('test', 0, load_optimizer_states=True, data_names=('data',))
mod2.bind(data_shapes=[('data', (10, 10))])
mod2.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})
assert mod._symbol.tojson() == mod2._symbol.tojson()
dict_equ(mod.get_params()[0], mod2.get_params()[0])
dict_equ(mod._updater.states, mod2._updater.states)
# multi device
mod = mx.mod.Module(sym, ('data',), context=[mx.cpu(0), mx.cpu(1)])
mod.bind(data_shapes=[('data', (10, 10))])
mod.init_params()
mod.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})
mod.update()
mod.save_checkpoint('test', 0, save_optimizer_states=True)
mod2 = mx.mod.Module.load('test', 0, load_optimizer_states=True, data_names=('data',))
mod2.bind(data_shapes=[('data', (10, 10))])
mod2.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})
assert mod._symbol.tojson() == mod2._symbol.tojson()
dict_equ(mod.get_params()[0], mod2.get_params()[0])
dict_equ(mod._kvstore._updater.states, mod2._updater.states)
@with_seed()
def test_module_reshape():
data = mx.sym.Variable('data')
sym = mx.sym.FullyConnected(data, num_hidden=20, name='fc')
dshape = (7, 20)
mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
mod.bind(data_shapes=[('data', dshape)])
mod.init_params()
mod.init_optimizer(optimizer_params={'learning_rate': 1})
mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
label=None))
mod.backward([mx.nd.ones(dshape)])
mod.update()
assert mod.get_outputs()[0].shape == dshape
assert (mod.get_params()[0]['fc_bias'].asnumpy() == -1).all()
dshape = (14, 20)
mod.reshape(data_shapes=[('data', dshape)])
mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
label=None))
mod.backward([mx.nd.ones(dshape)])
mod.update()
assert mod.get_outputs()[0].shape == dshape
assert (mod.get_params()[0]['fc_bias'].asnumpy() == -3).all()
@with_seed()
def test_module_states():
stack = mx.rnn.SequentialRNNCell()
for i in range(2):
stack.add(mx.rnn.LSTMCell(num_hidden=20, prefix='lstm_l%d_'%i))
begin_state = stack.begin_state(func=mx.sym.Variable)
_, states = stack.unroll(10, begin_state=begin_state, inputs=mx.sym.Variable('data'))
state_names = [i.name for i in begin_state]
mod = mx.mod.Module(mx.sym.Group(states), context=[mx.cpu(0), mx.cpu(1)],
label_names=None, state_names=state_names)
mod.bind(data_shapes=[('data', (5, 10))], label_shapes=None, for_training=False)
mod.init_params()
batch = mx.io.DataBatch(data=[mx.nd.zeros((5, 10))], label=[])
mod.set_states(value=1)
mod.forward(batch)
out = mod.get_outputs(merge_multi_context=False)
out1 = mod.get_outputs(merge_multi_context=True)
mod.set_states(states=out)
mod.forward(batch)
out2 = mod.get_outputs(merge_multi_context=True)
for x1, x2 in zip(out1, out2):
assert not mx.test_utils.almost_equal(x1.asnumpy(), x2.asnumpy(), rtol=1e-3)
@with_seed()
def test_module_switch_bucket():
vocab_dim = 5000
num_hidden = 100
num_embedding = 100
num_layer = 2
default_key = 10
test_key = 5
batch_size = 32
contexts = [mx.cpu(0)]
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
#generate symbols for an LSTM network
def sym_gen(seq_len):
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
embed = mx.sym.Embedding(data=data, input_dim=vocab_dim,
output_dim=num_embedding)
stack = mx.rnn.SequentialRNNCell()
for i in range(num_layer):
stack.add(mx.rnn.LSTMCell(num_hidden=num_hidden, prefix='lstm_l%d_'%i))
outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)
pred = mx.sym.Reshape(outputs, shape=(-1, num_hidden))
pred = mx.sym.FullyConnected(data=pred, num_hidden=vocab_dim, name='pred')
label = mx.sym.Reshape(label, shape=(-1,))
pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return pred, ('data',), ('softmax_label',)
def create_bucketing_module(key):
model = mx.mod.BucketingModule(
sym_gen = sym_gen,
default_bucket_key = key,
context = contexts)
model.bind([('data', (batch_size, key))],
[('softmax_label', (batch_size, key))], True, False)
model.init_params(initializer=initializer)
return model
#initialize the bucketing module with the default bucket key
bucketing_model = create_bucketing_module(default_key)
#check name
assert bucketing_model.symbol.list_arguments()[1] == "embedding0_weight",\
"Error in assigning names for args in BucketingModule"
#switch to test_key
bucketing_model.switch_bucket(test_key, [('data', (batch_size, test_key))],
[('softmax_label', (batch_size, test_key))])
total_bytes_before = bucketing_model._buckets[default_key]._total_exec_bytes
#remove test_key and switch again
del bucketing_model._buckets[test_key]
bucketing_model.switch_bucket(test_key, [('data', (batch_size, test_key))],
[('softmax_label', (batch_size, test_key))])
total_bytes_after = bucketing_model._buckets[default_key]._total_exec_bytes
#the default bucket is expected to reuse the bytes allocated
assert total_bytes_after == total_bytes_before
@with_seed(11)
def test_module_set_params():
# data iter
data = mx.nd.array([[0.05, .10]]);
label = mx.nd.array([[.01, 0.99]]);
train_data = mx.io.NDArrayIter(data, label, batch_size=1)
# symbols
x = mx.symbol.Variable('data')
x = mx.symbol.FullyConnected(name='fc_0', data=x, num_hidden=2)
x = mx.symbol.Activation(name="act_0", data=x, act_type='sigmoid')
x = mx.symbol.FullyConnected(name='fc_1', data=x, num_hidden=2)
x = mx.symbol.Activation(name="act_1", data=x, act_type='sigmoid')
x = mx.symbol.LinearRegressionOutput(data=x, name='softmax', grad_scale=2)
# create module
mod = mx.mod.Module(x, context=[mx.cpu()]);
mod.bind(train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True)
arg_params_correct = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),
'fc_0_bias' : mx.nd.array([.35, .35]),
'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),
'fc_1_bias' : mx.nd.array([.60, .60])}
arg_params_missing = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),
'fc_0_bias' : mx.nd.array([.35, .35]),
'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]])}
arg_params_extra = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),
'fc_0_bias' : mx.nd.array([.35, .35]),
'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),
'fc_1_bias' : mx.nd.array([.60, .60]),
'fc_2_weight': mx.nd.array([.60, .60])}
arg_params_missing_extra = {'fc_2_weight': mx.nd.array([.60, .60])}
# test regular set_params
mod.set_params(force_init=True, arg_params=arg_params_correct, aux_params={})
# test allow missing
mod.set_params(force_init=True, arg_params=arg_params_missing, aux_params={}, allow_missing=True)
assertRaises(RuntimeError, mod.set_params,
force_init=True, arg_params=arg_params_missing,
aux_params={}, allow_missing=False)
# test allow extra
mod.set_params(force_init=True, arg_params=arg_params_extra, aux_params={}, allow_missing=True, allow_extra=True)
assertRaises(ValueError, mod.set_params,
force_init=True, arg_params=arg_params_extra,
aux_params={}, allow_missing=True, allow_extra=False)
# test allow missing + extra,
assertRaises(RuntimeError, mod.set_params,
force_init=True, arg_params=arg_params_missing_extra,
aux_params={}, allow_missing=False, allow_extra=False)
# test allow missing + extra, this will throw a runtime error
assertRaises(ValueError, mod.set_params,
force_init=True, arg_params=arg_params_missing_extra,
aux_params={}, allow_missing=True, allow_extra=False)
@with_seed(11)
def test_monitor():
# data iter
data = mx.nd.array([[0.05, .10]]);
label = mx.nd.array([[.01, 0.99]]);
train_data = mx.io.NDArrayIter(data, label, batch_size=1)
# symbols
x = mx.symbol.Variable('data')
x = mx.symbol.FullyConnected(name='fc_0', data=x, num_hidden=2)
x = mx.symbol.Activation(name="act_0", data=x, act_type='sigmoid')
x = mx.symbol.FullyConnected(name='fc_1', data=x, num_hidden=2)
x = mx.symbol.Activation(name="act_1", data=x, act_type='sigmoid')
x = mx.symbol.LinearRegressionOutput(data=x, name='softmax', grad_scale=2)
# create monitor
def mean_abs(x):
sum_abs = mx.ndarray.sum(mx.ndarray.abs(x))
return mx.ndarray.divide(sum_abs, reduce(lambda x, y: x * y, x.shape))
mon = mx.mon.Monitor(1, stat_func=mean_abs, pattern='.*', sort=True)
# create module
mod = mx.mod.Module(x, context=[mx.cpu()]);
mod.bind(train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True)
mod.install_monitor(mon)
arg_params = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),
'fc_0_bias' : mx.nd.array([.35, .35]),
'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),
'fc_1_bias' : mx.nd.array([.60, .60])}
mod.init_params(arg_params=arg_params)
data_iter = iter(train_data)
data_batch = next(data_iter)
mon.tic()
mod.forward_backward(data_batch)
res = mon.toc()
keys = ['act_0', 'act_1', 'data', 'fc_0', 'fc_1', 'softmax']
mon_result_counts = [0, 0, 0, 0, 0, 0]
assert(len(res) == 21)
for n, k, v in res:
for idx, key in enumerate(keys):
if k.startswith(key):
mon_result_counts[idx] += 1
break
assert(mon_result_counts == [2, 2, 1, 6, 6, 4])
@with_seed()
def test_executor_group():
def get_rnn_sym(num_layers, num_words, num_hidden, num_embed, seq_len, sparse_embedding):
stack = mx.rnn.SequentialRNNCell()
for i in range(num_layers):
stack.add(mx.rnn.LSTMCell(num_hidden=num_hidden, prefix='lstm_l%d_' % i))
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
if sparse_embedding:
embed_weight = mx.sym.Variable('embed_weight', stype='row_sparse')
embed = mx.sym.contrib.SparseEmbedding(data=data, input_dim=num_words,
weight=embed_weight, output_dim=num_embed,
name='embed')
else:
embed = mx.sym.Embedding(data=data, input_dim=num_words,
output_dim=num_embed, name='embed')
stack.reset()
outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)
pred = mx.sym.Reshape(outputs, shape=(-1, num_hidden))
pred = mx.sym.FullyConnected(data=pred, num_hidden=num_words, name='pred')
label = mx.sym.Reshape(label, shape=(-1,))
pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return pred
def test_shared_exec_group(exec_grp_shared, exec_grp_created, shared_arg_names=None,
extra_args=None, check_shared_grad=True):
# Test shared data arrays
for i in range(len(exec_grp_shared.execs)):
# test same shared_data_arrays for two exec groups
shared_data_array1 = exec_grp_shared.shared_data_arrays[i]
shared_data_array2 = exec_grp_created.shared_data_arrays[i]
if extra_args is not None:
assert len(shared_data_array1) == len(extra_args),\
"exec_grp_shared.shared_data_arrays[%d] should have same number of args as extra_args"
assert len(shared_data_array1) == len(shared_data_array2),\
"length of shared_data_array of the shared executor group not equal to the created executor group"
for k, v in shared_data_array1.items():
if extra_args is not None:
assert k in extra_args, "arg %s is not in extra_args" % k
assert k in shared_data_array2,\
"arg %s of the shared executor group not in the shared_data_array of the created executor group" % k
assert mx.test_utils.same_array(v, shared_data_array2[k])
for data_name, array in exec_grp_shared.shared_data_arrays[i].items():
assert data_name in exec_grp_created.shared_data_arrays[i], \
"Shared input data '%s' is not in " \
"shared_data_arrays of created executor group." % (data_name)
assert mx.test_utils.same_array(array, exec_grp_created.shared_data_arrays[i][data_name]), \
"Shared input data '%s' does not share memory." % (data_name)
# Test shared argument arrays and gradient arrays
exec_shared = exec_grp_shared.execs[i]
exec_created = exec_grp_created.execs[i]
if shared_arg_names is not None:
# test shared arguments
for arg_name in shared_arg_names:
assert arg_name in exec_created.arg_dict, \
"Shared argument '%s' is not in arg_dict of created executor group." % (arg_name)
assert mx.test_utils.same_array(exec_shared.arg_dict[arg_name], exec_created.arg_dict[arg_name]), \
"Shared argument '%s' does not share memory." % (arg_name)
# test shared argument gradients
if check_shared_grad:
for arg_name in shared_arg_names:
assert arg_name in exec_created.grad_dict, \
"Shared argument gradient '%s' is not in " \
"grad_dict of created executor group." % (arg_name)
assert mx.test_utils.same_array(exec_shared.grad_dict[arg_name], \
exec_created.grad_dict[arg_name]), \
"Shared argument gradient '%s' does not share memory." % (arg_name)
for arg_name, grad in exec_grp_shared.grad_req.items():
assert grad == exec_grp_created.grad_req[arg_name], \
"Gradient requirements for shared argument '%s' are inconsistent. " \
"Shared executor group requires '%s' while created executor group requires '%s'" \
%(arg_name, grad, exec_grp_created.grad_req[arg_name])
def check_shared_exec_group(sparse_embedding):
# generate an rnn sym with #layers=5
sym = get_rnn_sym(num_layers=3, num_words=num_words, num_hidden=num_hidden,
num_embed=num_embed, seq_len=max_bucket_size,
sparse_embedding=sparse_embedding)
arg_names1 = sym.list_arguments()
input_names = [name[0] for name in data_shapes] + [name[0] for name in label_shapes]
shared_arg_names = [name for name in arg_names1 if name not in input_names]
exec_group1 = DataParallelExecutorGroup(symbol=sym, contexts=contexts,
workload=workload, data_shapes=data_shapes,
label_shapes=label_shapes, param_names=shared_arg_names,
for_training=True, inputs_need_grad=False)
# shared_data_arrays should only have input "data" and "softmax_label" arrays
for i in range(len(contexts)):
assert len(exec_group1.shared_data_arrays[i]) == len(input_names),\
"exec_group1.shared_data_arrays[%d] should have the same number of names as in input_names" % i
for name in input_names:
assert name in exec_group1.shared_data_arrays[i],\
"arg %s should be in exec_group1.shared_data_arrays[%d]" % (name, i)
# generate an rnn sym with #layers=5
sym = get_rnn_sym(num_layers=5, num_words=num_words, num_hidden=num_hidden,
num_embed=num_embed, seq_len=max_bucket_size,
sparse_embedding=sparse_embedding)
arg_names2 = sym.list_arguments()
exec_group2 = DataParallelExecutorGroup(symbol=sym, contexts=contexts,
workload=workload, data_shapes=data_shapes,
label_shapes=label_shapes, param_names=shared_arg_names,
for_training=True, inputs_need_grad=False,
shared_group=exec_group1)
extra_args = [name for name in arg_names2 if name not in shared_arg_names]
check_shared_grad = not sparse_embedding
test_shared_exec_group(exec_grp_shared=exec_group1, exec_grp_created=exec_group2,
shared_arg_names=shared_arg_names, extra_args=extra_args,
check_shared_grad=check_shared_grad)
contexts = [mx.cpu(0), mx.cpu(1)]
workload = [1] * len(contexts)
batch_size = 32
max_bucket_size = 80
num_words = 1000
num_hidden = 100
num_embed = 200
data_shapes = [('data', (batch_size, max_bucket_size))]
label_shapes = [('softmax_label', (batch_size, max_bucket_size))]
sparse_embedding_opt = [True, False]
for opt in sparse_embedding_opt:
check_shared_exec_group(opt)
@with_seed(11)
def test_factorization_machine_module(verbose=False):
""" Test factorization machine model with sparse operators """
def check_factorization_machine_module(optimizer=None, num_epochs=None):
print("check_factorization_machine_module( {} )".format(optimizer))
def fm(factor_size, feature_dim, init):
x = mx.symbol.Variable("data", stype='csr')
v = mx.symbol.Variable("v", shape=(feature_dim, factor_size),
init=init, stype='row_sparse')
w1_weight = mx.symbol.var('w1_weight', shape=(feature_dim, 1),
init=init, stype='row_sparse')
w1_bias = mx.symbol.var('w1_bias', shape=(1))
w1 = mx.symbol.broadcast_add(mx.symbol.dot(x, w1_weight), w1_bias)
v_s = mx.symbol._internal._square_sum(data=v, axis=1, keepdims=True)
x_s = mx.symbol.square(data=x)
bd_sum = mx.sym.dot(x_s, v_s)
w2 = mx.symbol.dot(x, v)
w2_squared = 0.5 * mx.symbol.square(data=w2)
w_all = mx.symbol.Concat(w1, w2_squared, dim=1)
sum1 = mx.symbol.sum(data=w_all, axis=1, keepdims=True)
sum2 = 0.5 * mx.symbol.negative(bd_sum)
model = mx.sym.elemwise_add(sum1, sum2)
y = mx.symbol.Variable("label")
model = mx.symbol.LinearRegressionOutput(data=model, label=y)
return model
# model
init = mx.initializer.Normal(sigma=0.01)
factor_size = 4
feature_dim = 10000
model = fm(factor_size, feature_dim, init)
# data iter
num_batches = 5
batch_size = 64
num_samples = batch_size * num_batches
# generate some random csr data
csr_nd = rand_ndarray((num_samples, feature_dim), 'csr', 0.1)
label = mx.nd.ones((num_samples,1))
# the alternative is to use LibSVMIter
train_iter = mx.io.NDArrayIter(data=csr_nd,
label={'label':label},
batch_size=batch_size,
last_batch_handle='discard')
# create module
mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['label'])
# allocate memory by given the input data and lable shapes
mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
# initialize parameters by uniform random numbers
mod.init_params(initializer=init)
if optimizer == 'sgd':
# use Sparse SGD with learning rate 0.1 to train
sgd = mx.optimizer.SGD(momentum=0.1, clip_gradient=5.0, learning_rate=0.01,
rescale_grad=1.0/batch_size)
mod.init_optimizer(optimizer=sgd)
if num_epochs is None:
num_epochs = 10
expected_accuracy = 0.02
elif optimizer == 'adam':
# use Sparse Adam to train
adam = mx.optimizer.Adam(clip_gradient=5.0, learning_rate=0.0005,
rescale_grad=1.0/batch_size)
mod.init_optimizer(optimizer=adam)
if num_epochs is None:
num_epochs = 10
expected_accuracy = 0.05
elif optimizer == 'adagrad':
# use Sparse AdaGrad with learning rate 0.1 to train
adagrad = mx.optimizer.AdaGrad(clip_gradient=5.0, learning_rate=0.01,
rescale_grad=1.0/batch_size)
mod.init_optimizer(optimizer=adagrad)
if num_epochs is None:
num_epochs = 20
expected_accuracy = 0.09
else:
raise AssertionError("Unsupported optimizer type '" + optimizer + "' specified")
# use accuracy as the metric
metric = mx.metric.create('MSE')
# train 'num_epochs' epoch
for epoch in range(num_epochs):
train_iter.reset()
metric.reset()
for batch in train_iter:
mod.forward(batch, is_train=True) # compute predictions
mod.update_metric(metric, batch.label) # accumulate prediction accuracy
mod.backward() # compute gradients
mod.update() # update parameters
print('Epoch %d, Training %s' % (epoch, metric.get()))
if num_epochs > 1:
assert(metric.get()[1] < expected_accuracy)
if verbose is True:
print("============ SGD ==========================")
start = time.clock()
check_factorization_machine_module('sgd')
if verbose is True:
print("Duration: {}".format(time.clock() - start))
print("============ ADAM ==========================")
start = time.clock()
check_factorization_machine_module('adam')
if verbose is True:
print("Duration: {}".format(time.clock() - start))
print("============ ADAGRAD ==========================")
start = time.clock()
check_factorization_machine_module('adagrad')
if verbose is True:
print("Duration: {}".format(time.clock() - start))
@with_seed()
def test_module_initializer():
def regression_model(m):
x = mx.symbol.var("data", stype='csr')
v = mx.symbol.var("v", shape=(m, 1), init=mx.init.Uniform(scale=.1),
stype='row_sparse')
model = mx.symbol.dot(lhs=x, rhs=v)
y = mx.symbol.Variable("label")
model = mx.symbol.LinearRegressionOutput(data=model, label=y, name="out")
return model
n, m = 128, 100
model = regression_model(m)
data = mx.nd.zeros(shape=(n, m), stype='csr')
label = mx.nd.zeros((n, 1))
iterator = mx.io.NDArrayIter(data=data, label={'label':label},
batch_size=n, last_batch_handle='discard')
# create module
mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['label'])
mod.bind(data_shapes=iterator.provide_data, label_shapes=iterator.provide_label)
mod.init_params()
v = mod._arg_params['v']
assert(v.stype == 'row_sparse')
assert(np.sum(v.asnumpy()) != 0)
@with_seed()
def test_forward_reshape():
num_class=10
data1 = mx.sym.Variable('data1')
data2 = mx.sym.Variable('data2')
conv1 = mx.sym.Convolution(data=data1, kernel=(2, 2), num_filter=2, stride=(2, 2))
conv2 = mx.sym.Convolution(data=data2, kernel=(3, 3), num_filter=3, stride=(1, 1))
pooling1 = mx.sym.Pooling(data=conv1, kernel=(2, 2), stride=(1, 1), pool_type="avg")
pooling2 = mx.sym.Pooling(data=conv2, kernel=(2, 2), stride=(1, 1), pool_type="max")
flatten1 = mx.sym.flatten(data=pooling1)
flatten2 = mx.sym.flatten(data=pooling2)
sum = mx.sym.sum(data=flatten1, axis=1) + mx.sym.sum(data=flatten2, axis=1)
fc = mx.sym.FullyConnected(data=sum, num_hidden=num_class)
sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')
dshape1 = (10, 3, 64, 64)
dshape2 = (10, 3, 32, 32)
lshape = (10,)
mod = mx.mod.Module(symbol=sym, data_names=['data1', 'data2'],
label_names=['softmax_label'])
mod.bind(data_shapes=[('data1', dshape1), ('data2', dshape2)],
label_shapes=[('softmax_label', lshape)])
mod.init_params()
mod.init_optimizer(optimizer_params={'learning_rate': 0.01})
# Train with original data shapes
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),
mx.nd.random.uniform(5, 15, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
# Train with different batch size
dshape1 = (3, 3, 64, 64)
dshape2 = (3, 3, 32, 32)
lshape = (3,)
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),
mx.nd.random.uniform(5, 15, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
dshape1 = (20, 3, 64, 64)
dshape2 = (20, 3, 32, 32)
lshape = (20,)
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(3, 5, dshape1),
mx.nd.random.uniform(10, 25, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
#Train with both different batch size and data shapes
dshape1 = (20, 3, 120, 120)
dshape2 = (20, 3, 32, 64)
lshape = (20,)
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),
mx.nd.random.uniform(5, 15, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
dshape1 = (5, 3, 28, 40)
dshape2 = (5, 3, 24, 16)
lshape = (5,)
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),
mx.nd.random.uniform(15, 25, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
#Test score
dataset_shape1 = (30, 3, 30, 30)
dataset_shape2 = (30, 3, 20, 40)
labelset_shape = (30,)
eval_dataiter = mx.io.NDArrayIter(data=[mx.nd.random.uniform(0, 9, dataset_shape1),
mx.nd.random.uniform(15, 25, dataset_shape2)],
label=[mx.nd.ones(labelset_shape)],
batch_size=5)
assert len(mod.score(eval_data=eval_dataiter, eval_metric='acc')) == 1
#Test prediction
dshape1 = (1, 3, 30, 30)
dshape2 = (1, 3, 20, 40)
dataset_shape1 = (10, 3, 30, 30)
dataset_shape2 = (10, 3, 20, 40)
pred_dataiter = mx.io.NDArrayIter(data=[mx.nd.random.uniform(0, 9, dataset_shape1),
mx.nd.random.uniform(15, 25, dataset_shape2)])
mod.bind(data_shapes=[('data1', dshape1), ('data2', dshape2)],
for_training=False, force_rebind=True)
assert mod.predict(pred_dataiter).shape == tuple([10, num_class])
#Test forward with other data batch API
Batch = namedtuple('Batch', ['data'])
data = mx.sym.Variable('data')
out = data * 2
mod = mx.mod.Module(symbol=out, label_names=None)
mod.bind(data_shapes=[('data', (1, 10))])
mod.init_params()
data1 = [mx.nd.ones((1, 10))]
mod.forward(Batch(data1))
assert mod.get_outputs()[0].shape == (1, 10)
data2 = [mx.nd.ones((3, 5))]
mod.forward(Batch(data2))
assert mod.get_outputs()[0].shape == (3, 5)
if __name__ == '__main__':
import nose
nose.runmodule()
| apache-2.0 |
bgxavier/neutron | neutron/plugins/brocade/db/models.py | 63 | 4551 | # Copyright 2013 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Brocade specific database schema/model."""
import sqlalchemy as sa
from neutron.db import model_base
from neutron.db import models_v2
class BrocadeNetwork(model_base.BASEV2, models_v2.HasId):
"""Schema for brocade network."""
vlan = sa.Column(sa.String(10))
class BrocadePort(model_base.BASEV2):
"""Schema for brocade port."""
port_id = sa.Column(sa.String(36), primary_key=True, default="",
server_default='')
network_id = sa.Column(sa.String(36),
sa.ForeignKey("brocadenetworks.id"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean, nullable=False)
physical_interface = sa.Column(sa.String(36))
vlan_id = sa.Column(sa.String(36))
tenant_id = sa.Column(sa.String(36))
def create_network(context, net_id, vlan):
"""Create a brocade specific network/port-profiles."""
session = context.session
with session.begin(subtransactions=True):
net = BrocadeNetwork(id=net_id, vlan=vlan)
session.add(net)
return net
def delete_network(context, net_id):
"""Delete a brocade specific network/port-profiles."""
session = context.session
with session.begin(subtransactions=True):
net = (session.query(BrocadeNetwork).filter_by(id=net_id).first())
if net is not None:
session.delete(net)
def get_network(context, net_id, fields=None):
"""Get brocade specific network, with vlan extension."""
session = context.session
return (session.query(BrocadeNetwork).filter_by(id=net_id).first())
def get_networks(context, filters=None, fields=None):
"""Get all brocade specific networks."""
session = context.session
try:
nets = session.query(BrocadeNetwork).all()
return nets
except sa.exc.SQLAlchemyError:
return None
def create_port(context, port_id, network_id, physical_interface,
vlan_id, tenant_id, admin_state_up):
"""Create a brocade specific port, has policy like vlan."""
# port_id is truncated: since the linux-bridge tap device names are
# based on truncated port id, this enables port lookups using
# tap devices
port_id = port_id[0:11]
session = context.session
with session.begin(subtransactions=True):
port = BrocadePort(port_id=port_id,
network_id=network_id,
physical_interface=physical_interface,
vlan_id=vlan_id,
admin_state_up=admin_state_up,
tenant_id=tenant_id)
session.add(port)
return port
def get_port(context, port_id):
"""get a brocade specific port."""
port_id = port_id[0:11]
session = context.session
port = (session.query(BrocadePort).filter_by(port_id=port_id).first())
return port
def get_ports(context, network_id=None):
"""get a brocade specific port."""
session = context.session
ports = (session.query(BrocadePort).filter_by(network_id=network_id).all())
return ports
def delete_port(context, port_id):
"""delete brocade specific port."""
port_id = port_id[0:11]
session = context.session
with session.begin(subtransactions=True):
port = (session.query(BrocadePort).filter_by(port_id=port_id).first())
if port is not None:
session.delete(port)
def get_port_from_device(session, port_id):
"""get port from the tap device."""
# device is same as truncated port_id
port = (session.query(BrocadePort).filter_by(port_id=port_id).first())
return port
def update_port_state(context, port_id, admin_state_up):
"""Update port attributes."""
port_id = port_id[0:11]
session = context.session
session.query(BrocadePort).filter_by(
port_id=port_id).update({'admin_state_up': admin_state_up})
| apache-2.0 |
snasoft/QtCreatorPluginsPack | Bin/3rdParty/vera/bin/lib/test/test_isinstance.py | 198 | 9806 | # Tests some corner cases with isinstance() and issubclass(). While these
# tests use new style classes and properties, they actually do whitebox
# testing of error conditions uncovered when using extension types.
import unittest
from test import test_support
import sys
class TestIsInstanceExceptions(unittest.TestCase):
# Test to make sure that an AttributeError when accessing the instance's
# class's bases is masked. This was actually a bug in Python 2.2 and
# 2.2.1 where the exception wasn't caught but it also wasn't being cleared
# (leading to an "undetected error" in the debug build). Set up is,
# isinstance(inst, cls) where:
#
# - inst isn't an InstanceType
# - cls isn't a ClassType, a TypeType, or a TupleType
# - cls has a __bases__ attribute
# - inst has a __class__ attribute
# - inst.__class__ as no __bases__ attribute
#
# Sounds complicated, I know, but this mimics a situation where an
# extension type raises an AttributeError when its __bases__ attribute is
# gotten. In that case, isinstance() should return False.
def test_class_has_no_bases(self):
class I(object):
def getclass(self):
# This must return an object that has no __bases__ attribute
return None
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertEqual(False, isinstance(I(), C()))
# Like above except that inst.__class__.__bases__ raises an exception
# other than AttributeError
def test_bases_raises_other_than_attribute_error(self):
class E(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class I(object):
def getclass(self):
return E()
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Here's a situation where getattr(cls, '__bases__') raises an exception.
# If that exception is not AttributeError, it should not get masked
def test_dont_mask_non_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Like above, except that getattr(cls, '__bases__') raises an
# AttributeError, which /should/ get masked as a TypeError
def test_mask_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, isinstance, I(), C())
# These tests are similar to above, but tickle certain code paths in
# issubclass() instead of isinstance() -- really PyObject_IsSubclass()
# vs. PyObject_IsInstance().
class TestIsSubclassExceptions(unittest.TestCase):
def test_dont_mask_non_attribute_error(self):
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(RuntimeError, issubclass, C(), S())
def test_mask_attribute_error(self):
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(TypeError, issubclass, C(), S())
# Like above, but test the second branch, where the __bases__ of the
# second arg (the cls arg) is tested. This means the first arg must
# return a valid __bases__, and it's okay for it to be a normal --
# unrelated by inheritance -- class.
def test_dont_mask_non_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, issubclass, B, C())
def test_mask_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, issubclass, B, C())
# meta classes for creating abstract classes and instances
class AbstractClass(object):
def __init__(self, bases):
self.bases = bases
def getbases(self):
return self.bases
__bases__ = property(getbases)
def __call__(self):
return AbstractInstance(self)
class AbstractInstance(object):
def __init__(self, klass):
self.klass = klass
def getclass(self):
return self.klass
__class__ = property(getclass)
# abstract classes
AbstractSuper = AbstractClass(bases=())
AbstractChild = AbstractClass(bases=(AbstractSuper,))
# normal classes
class Super:
pass
class Child(Super):
pass
# new-style classes
class NewSuper(object):
pass
class NewChild(NewSuper):
pass
class TestIsInstanceIsSubclass(unittest.TestCase):
# Tests to ensure that isinstance and issubclass work on abstract
# classes and instances. Before the 2.2 release, TypeErrors were
# raised when boolean values should have been returned. The bug was
# triggered by mixing 'normal' classes and instances were with
# 'abstract' classes and instances. This case tries to test all
# combinations.
def test_isinstance_normal(self):
# normal instances
self.assertEqual(True, isinstance(Super(), Super))
self.assertEqual(False, isinstance(Super(), Child))
self.assertEqual(False, isinstance(Super(), AbstractSuper))
self.assertEqual(False, isinstance(Super(), AbstractChild))
self.assertEqual(True, isinstance(Child(), Super))
self.assertEqual(False, isinstance(Child(), AbstractSuper))
def test_isinstance_abstract(self):
# abstract instances
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
self.assertEqual(False, isinstance(AbstractSuper(), Super))
self.assertEqual(False, isinstance(AbstractSuper(), Child))
self.assertEqual(True, isinstance(AbstractChild(), AbstractChild))
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractChild(), Super))
self.assertEqual(False, isinstance(AbstractChild(), Child))
def test_subclass_normal(self):
# normal classes
self.assertEqual(True, issubclass(Super, Super))
self.assertEqual(False, issubclass(Super, AbstractSuper))
self.assertEqual(False, issubclass(Super, Child))
self.assertEqual(True, issubclass(Child, Child))
self.assertEqual(True, issubclass(Child, Super))
self.assertEqual(False, issubclass(Child, AbstractSuper))
def test_subclass_abstract(self):
# abstract classes
self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper))
self.assertEqual(False, issubclass(AbstractSuper, AbstractChild))
self.assertEqual(False, issubclass(AbstractSuper, Child))
self.assertEqual(True, issubclass(AbstractChild, AbstractChild))
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
self.assertEqual(False, issubclass(AbstractChild, Super))
self.assertEqual(False, issubclass(AbstractChild, Child))
def test_subclass_tuple(self):
# test with a tuple as the second argument classes
self.assertEqual(True, issubclass(Child, (Child,)))
self.assertEqual(True, issubclass(Child, (Super,)))
self.assertEqual(False, issubclass(Super, (Child,)))
self.assertEqual(True, issubclass(Super, (Child, Super)))
self.assertEqual(False, issubclass(Child, ()))
self.assertEqual(True, issubclass(Super, (Child, (Super,))))
self.assertEqual(True, issubclass(NewChild, (NewChild,)))
self.assertEqual(True, issubclass(NewChild, (NewSuper,)))
self.assertEqual(False, issubclass(NewSuper, (NewChild,)))
self.assertEqual(True, issubclass(NewSuper, (NewChild, NewSuper)))
self.assertEqual(False, issubclass(NewChild, ()))
self.assertEqual(True, issubclass(NewSuper, (NewChild, (NewSuper,))))
self.assertEqual(True, issubclass(int, (long, (float, int))))
if test_support.have_unicode:
self.assertEqual(True, issubclass(str, (unicode, (Child, NewChild, basestring))))
def test_subclass_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, issubclass, str, str)
def test_isinstance_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, isinstance, '', str)
def blowstack(fxn, arg, compare_to):
# Make sure that calling isinstance with a deeply nested tuple for its
# argument will raise RuntimeError eventually.
tuple_arg = (compare_to,)
for cnt in xrange(sys.getrecursionlimit()+5):
tuple_arg = (tuple_arg,)
fxn(arg, tuple_arg)
def test_main():
test_support.run_unittest(
TestIsInstanceExceptions,
TestIsSubclassExceptions,
TestIsInstanceIsSubclass
)
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
siliconsmiley/QGIS | python/plugins/processing/algs/lidar/lastools/lasthinPro.py | 1 | 3972 | # -*- coding: utf-8 -*-
"""
***************************************************************************
lasthinPro.py
---------------------
Date : October 2014
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'October 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
class lasthinPro(LAStoolsAlgorithm):
THIN_STEP = "THIN_STEP"
OPERATION = "OPERATION"
OPERATIONS= ["lowest", "random", "highest"]
WITHHELD = "WITHHELD"
CLASSIFY_AS = "CLASSIFY_AS"
CLASSIFY_AS_CLASS = "CLASSIFY_AS_CLASS"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('lasthinPro')
self.group, self.i18n_group = self.trAlgorithm('LAStools Production')
self.addParametersPointInputFolderGUI()
self.addParameter(ParameterNumber(lasthinPro.THIN_STEP,
self.tr("size of grid used for thinning"), 0, None, 1.0))
self.addParameter(ParameterSelection(lasthinPro.OPERATION,
self.tr("keep particular point per cell"), lasthinPro.OPERATIONS, 0))
self.addParameter(ParameterBoolean(lasthinPro.WITHHELD,
self.tr("mark thinned-away points as withheld"), False))
self.addParameter(ParameterBoolean(lasthinPro.CLASSIFY_AS,
self.tr("classify surviving points as class"), False))
self.addParameter(ParameterNumber(lasthinPro.CLASSIFY_AS_CLASS,
self.tr("class"), 0, None, 8))
self.addParametersOutputDirectoryGUI()
self.addParametersOutputAppendixGUI()
self.addParametersPointOutputFormatGUI()
self.addParametersAdditionalGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasthin")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
step = self.getParameterValue(lasthinPro.THIN_STEP)
if step != 0.0:
commands.append("-step")
commands.append(unicode(step))
operation = self.getParameterValue(lasthinPro.OPERATION)
if operation != 0:
commands.append("-" + self.OPERATIONS[operation])
if self.getParameterValue(lasthinPro.WITHHELD):
commands.append("-withheld")
if self.getParameterValue(lasthinPro.CLASSIFY_AS):
commands.append("-classify_as")
commands.append(unicode(self.getParameterValue(lasthinPro.CLASSIFY_AS_CLASS)))
self.addParametersOutputDirectoryCommands(commands)
self.addParametersOutputAppendixCommands(commands)
self.addParametersPointOutputFormatCommands(commands)
self.addParametersAdditionalCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 |
alexryndin/ambari | ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py | 2 | 8157 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from stacks.utils.RMFTestCase import RMFTestCase, Template, InlineTemplate, StaticFile
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.libraries.script.config_dictionary import UnknownConfiguration
from mock.mock import MagicMock, call, patch
@patch("os.listdir", new = MagicMock(return_value=['solr-8886.pid']))
@patch("os.path.isdir", new = MagicMock(return_value=True))
class TestInfraSolr(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "AMBARI_INFRA/0.1.0/package"
STACK_VERSION = "2.4"
def configureResourcesCalled(self):
self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr',
owner = 'solr',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
mode = 0755
)
self.assertResourceCalled('Directory', '/var/run/ambari-infra-solr',
owner = 'solr',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
mode = 0755
)
self.assertResourceCalled('Directory', '/opt/ambari_infra_solr/data',
owner = 'solr',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
mode = 0755
)
self.assertResourceCalled('Directory', '/opt/ambari_infra_solr/data/resources',
owner = 'solr',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
mode = 0755
)
self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr',
owner = 'solr',
group = 'hadoop',
create_parents = True,
recursive_ownership = True,
cd_access = 'a',
mode = 0755
)
self.assertResourceCalled('Directory', '/etc/ambari-infra-solr/conf',
owner = 'solr',
group = 'hadoop',
create_parents = True,
recursive_ownership = True,
cd_access = 'a',
mode = 0755
)
self.assertResourceCalled('File', '/var/log/ambari-infra-solr/solr-install.log',
owner = 'solr',
group = 'hadoop',
mode = 0644,
content = ''
)
self.assertResourceCalled('File', '/etc/ambari-infra-solr/conf/infra-solr-env.sh',
owner = 'solr',
group='hadoop',
mode = 0755,
content = InlineTemplate(self.getConfig()['configurations']['infra-solr-env']['content'])
)
self.assertResourceCalled('File', '/opt/ambari_infra_solr/data/solr.xml',
owner = 'solr',
group='hadoop',
content = InlineTemplate(self.getConfig()['configurations']['infra-solr-xml']['content'])
)
self.assertResourceCalled('File', '/etc/ambari-infra-solr/conf/log4j.properties',
owner = 'solr',
group='hadoop',
content = InlineTemplate(self.getConfig()['configurations']['infra-solr-log4j']['content'])
)
self.assertResourceCalled('File', '/etc/ambari-infra-solr/conf/custom-security.json',
owner = 'solr',
group='hadoop',
content = InlineTemplate(self.getConfig()['configurations']['infra-solr-security-json']['content']),
mode = 0640
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --create-znode --retry 30 --interval 5')
self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --cluster-prop --property-name urlScheme --property-value http')
self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --setup-kerberos-plugin')
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/infra_solr.py",
classname = "InfraSolr",
command = "configure",
config_file = "default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/infra_solr.py",
classname = "InfraSolr",
command = "start",
config_file = "default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.configureResourcesCalled()
self.assertResourceCalled('Execute', "/usr/lib/ambari-infra-solr/bin/solr start -cloud -noprompt -s /opt/ambari_infra_solr/data >> /var/log/ambari-infra-solr/solr-install.log 2>&1",
environment = {'SOLR_INCLUDE': '/etc/ambari-infra-solr/conf/infra-solr-env.sh'},
user = "solr"
)
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/infra_solr.py",
classname = "InfraSolr",
command = "stop",
config_file = "default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/lib/ambari-infra-solr/bin/solr stop -all >> /var/log/ambari-infra-solr/solr-install.log',
environment = {'SOLR_INCLUDE': '/etc/ambari-infra-solr/conf/infra-solr-env.sh'},
user = "solr",
only_if = "test -f /var/run/ambari-infra-solr/solr-8886.pid"
)
self.assertResourceCalled('File', '/var/run/ambari-infra-solr/solr-8886.pid',
action = ['delete']
)
| apache-2.0 |
vshtanko/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
kanagasabapathi/python-for-android | python3-alpha/python3-src/Tools/scripts/linktree.py | 49 | 2443 | #! /usr/bin/env python3
# linktree
#
# Make a copy of a directory tree with symbolic links to all files in the
# original tree.
# All symbolic links go to a special symbolic link at the top, so you
# can easily fix things if the original source tree moves.
# See also "mkreal".
#
# usage: mklinks oldtree newtree
import sys, os
LINK = '.LINK' # Name of special symlink at the top.
debug = 0
def main():
if not 3 <= len(sys.argv) <= 4:
print('usage:', sys.argv[0], 'oldtree newtree [linkto]')
return 2
oldtree, newtree = sys.argv[1], sys.argv[2]
if len(sys.argv) > 3:
link = sys.argv[3]
link_may_fail = 1
else:
link = LINK
link_may_fail = 0
if not os.path.isdir(oldtree):
print(oldtree + ': not a directory')
return 1
try:
os.mkdir(newtree, 0o777)
except os.error as msg:
print(newtree + ': cannot mkdir:', msg)
return 1
linkname = os.path.join(newtree, link)
try:
os.symlink(os.path.join(os.pardir, oldtree), linkname)
except os.error as msg:
if not link_may_fail:
print(linkname + ': cannot symlink:', msg)
return 1
else:
print(linkname + ': warning: cannot symlink:', msg)
linknames(oldtree, newtree, link)
return 0
def linknames(old, new, link):
if debug: print('linknames', (old, new, link))
try:
names = os.listdir(old)
except os.error as msg:
print(old + ': warning: cannot listdir:', msg)
return
for name in names:
if name not in (os.curdir, os.pardir):
oldname = os.path.join(old, name)
linkname = os.path.join(link, name)
newname = os.path.join(new, name)
if debug > 1: print(oldname, newname, linkname)
if os.path.isdir(oldname) and \
not os.path.islink(oldname):
try:
os.mkdir(newname, 0o777)
ok = 1
except:
print(newname + \
': warning: cannot mkdir:', msg)
ok = 0
if ok:
linkname = os.path.join(os.pardir,
linkname)
linknames(oldname, newname, linkname)
else:
os.symlink(linkname, newname)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
amohanta/yalih | mechanize/_http.py | 133 | 14354 | """HTTP related handlers.
Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import HTMLParser
from cStringIO import StringIO
import htmlentitydefs
import logging
import robotparser
import socket
import time
import _sgmllib_copy as sgmllib
from _urllib2_fork import HTTPError, BaseHandler
from _headersutil import is_html
from _html import unescape, unescape_charref
from _request import Request
from _response import response_seek_wrapper
import _rfc3986
import _sockettimeout
debug = logging.getLogger("mechanize").debug
debug_robots = logging.getLogger("mechanize.robots").debug
# monkeypatch urllib2.HTTPError to show URL
## import urllib2
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
html_headers = parse_head(response,
self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
self._timeout = _sockettimeout._GLOBAL_DEFAULT_TIMEOUT
def set_opener(self, opener=None):
import _opener
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def set_timeout(self, timeout):
self._timeout = timeout
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False,
timeout=self._timeout)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
except (IOError, socket.error, OSError), exc:
debug_robots("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
debug_robots("disallow all")
elif status >= 400:
self.allow_all = True
debug_robots("allow all")
elif status == 200 and lines:
debug_robots("parse lines")
self.parse(lines)
class RobotExclusionError(HTTPError):
def __init__(self, request, *args):
apply(HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.set_timeout(request.timeout)
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
self._sleep = time.sleep
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
self._sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
else:
debug("Refresh header ignored: %r" % refresh)
return response
https_response = http_response
| apache-2.0 |
wenxichen/tensorflow_yolo2 | src/slim_dir/nets/inception_utils.py | 66 | 2630 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common code shared by all inception models.
Usage of arg scope:
with slim.arg_scope(inception_arg_scope()):
logits, end_points = inception.inception_v3(images, num_classes,
is_training=is_training)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def inception_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Defines the default arg scope for inception models.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the inception models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
| mit |
psawaya/Mental-Ginger | django/conf/locale/ka/formats.py | 329 | 1888 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'l, j F, Y'
TIME_FORMAT = 'h:i:s a'
DATETIME_FORMAT = 'j F, Y h:i:s a'
YEAR_MONTH_FORMAT = 'F, Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j.M.Y'
SHORT_DATETIME_FORMAT = 'j.M.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
# '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/preprocessing/data.py | 68 | 57385 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
griffincalme/LineweaverBurkKinematics | CompareTwoEnzymes.py | 1 | 2335 | #Griffin Calme
#Python 3.5.0
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
#Raw Data
s = np.array([1., 3., 5., 10., 20., 50., 100.]) #substrate concentration (micromoles)
vA = np.array([0.67, 1.20, 1.43, 1.67, 1.82, 1.92, 1.96]) #velocity of enzyme A at corresponding concentration (micromoles/sec)
vB = np.array([0.40, 0.86, 1.11, 1.43, 1.67, 1.85, 1.92]) #velocity of enzyme Bat corresponding concentration (micromoles/sec)
#take reciprocal of Michaelis-Menten to plot in Lineweaver-Burke
Recip_s = np.reciprocal(s)
Recip_vA = np.reciprocal(vA)
Recip_vB = np.reciprocal(vB)
#Calculate linear regression
slopeA, interceptA, r_valueA, p_valueA, std_errA = stats.linregress(Recip_s, Recip_vA)
slopeB, interceptB, r_valueB, p_valueB, std_errB = stats.linregress(Recip_s, Recip_vB)
#Draw linear regression A
xA = np.linspace(-1., 2., 1000)
for iA in xA:
yA = (slopeA * xA) + interceptA
#Draw linear regression B
xB = np.linspace(-1., 2., 1000)
for iB in xB:
yB = (slopeB * xB) + interceptB
#Plot 1/Vmax A
plt.scatter(0, interceptA, color='red')
print("\n----Values for vA----")
print("1/Vmax A = ", interceptA)
print("Vmax A = ", 1/interceptA)
#Plot -1/Km A
xinterceptA = ((0 - interceptA)/slopeA)
plt.scatter(xinterceptA, 0, color='red')
print("\n-1/Km A = ", xinterceptA)
KmA = (-1 / xinterceptA)
print("Km A = ", KmA)
print("\nKm/Vmax A (slope A): ", slopeA)
#Plot 1/Vmax B
plt.scatter(0, interceptB, color='green')
print("\n\n----Values for vB----")
print("1/Vmax B = ", interceptB)
print("Vmax B = ", 1/interceptB)
#Plot -1/Km B
xinterceptB = ((0 - interceptB)/slopeB)
plt.scatter(xinterceptB, 0, color='green')
print("\n-1/Km B = ", xinterceptB)
KmB = (-1 / xinterceptB)
print("Km B = ", KmB)
print("\nKm/Vmax B (slope B): ", slopeB)
#Draw x & y origins
plt.axhline(0, color='black')
plt.axvline(0, color='black')
#Graph scatter points
plt.scatter(Recip_s, Recip_vA, color='blue')
plt.scatter(Recip_s, Recip_vB, color='purple')
#Graph linear regression
plt.plot(xA, yA, label='A', color='blue', linestyle='--')
plt.plot(xB, yB, label='B', color='purple', linestyle=':')
#Titles and labels
plt.xlabel('1/[S] ($\mu$M)')
plt.ylabel('1/v ($\mu$M/s)')
plt.title('Lineweaver-Burk')
plt.legend()
plt.show()
| mit |
redebian/documentation | django/contrib/comments/views/utils.py | 192 | 1947 | """
A few bits of helper functions for comment views.
"""
import urllib
import textwrap
from django.http import HttpResponseRedirect
from django.core import urlresolvers
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import comments
def next_redirect(data, default, default_view, **get_kwargs):
"""
Handle the "where should I go next?" part of comment views.
The next value could be a kwarg to the function (``default``), or a
``?next=...`` GET arg, or the URL of a given view (``default_view``). See
the view modules for examples.
Returns an ``HttpResponseRedirect``.
"""
next = data.get("next", default)
if next is None:
next = urlresolvers.reverse(default_view)
if get_kwargs:
if '#' in next:
tmp = next.rsplit('#', 1)
next = tmp[0]
anchor = '#' + tmp[1]
else:
anchor = ''
joiner = ('?' in next) and '&' or '?'
next += joiner + urllib.urlencode(get_kwargs) + anchor
return HttpResponseRedirect(next)
def confirmation_view(template, doc="Display a confirmation view."):
"""
Confirmation view generator for the "comment was
posted/flagged/deleted/approved" views.
"""
def confirmed(request):
comment = None
if 'c' in request.GET:
try:
comment = comments.get_model().objects.get(pk=request.GET['c'])
except (ObjectDoesNotExist, ValueError):
pass
return render_to_response(template,
{'comment': comment},
context_instance=RequestContext(request)
)
confirmed.__doc__ = textwrap.dedent("""\
%s
Templates: `%s``
Context:
comment
The posted comment
""" % (doc, template)
)
return confirmed
| bsd-3-clause |
google-research/dice_rl | google/scripts/xm_run_tabular_teq_dice.py | 1 | 2698 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import os
from google3.learning.deepmind.xmanager import hyper
import google3.learning.deepmind.xmanager2.client.google as xm
flags.DEFINE_string('exp_name', 'teqdice', 'Name of experiment.')
flags.DEFINE_string('cell', None, 'The cell to run jobs in.')
flags.DEFINE_integer('priority', 25, 'Priority to run job as.')
flags.DEFINE_string('env_name', 'grid', 'Environment name.')
flags.DEFINE_integer('num_seeds', 20, 'Number of random seed.')
flags.DEFINE_string('load_dir', '/cns/is-d/home/sherryy/teqdice/data/',
'Directory to load dataset from.')
flags.DEFINE_string('save_dir', '/cns/is-d/home/sherryy/teqdice/results/r=2/',
'Directory to save the results to.')
FLAGS = flags.FLAGS
def build_experiment():
runtime_worker = xm.Borg(
cell=FLAGS.cell,
priority=FLAGS.priority,
)
executable = xm.BuildTarget(
'//third_party/py/dice_rl/google/scripts:run_tabular_teq_dice',
build_flags=['-c', 'opt', '--copt=-mavx'],
args=[
('env_name', FLAGS.env_name),
('load_dir', FLAGS.load_dir),
('save_dir', os.path.join(FLAGS.save_dir, FLAGS.exp_name)),
('max_trajectory_length_train', 50),
('num_trajectory', 1000),
],
platform=xm.Platform.CPU,
runtime=runtime_worker)
parameters = hyper.product([
hyper.sweep('seed', hyper.discrete(list(range(FLAGS.num_seeds)))),
hyper.sweep('step_encoding', hyper.categorical([None, 'one_hot'])),
hyper.sweep('max_trajectory_length', hyper.discrete([5, 10, 20, 50])),
])
experiment = xm.ParameterSweep(executable, parameters)
return experiment
def main(_):
"""Launch the experiment using the arguments from the command line."""
description = xm.ExperimentDescription(
FLAGS.exp_name, tags=[
FLAGS.env_name,
])
experiment = build_experiment()
xm.launch_experiment(description, experiment)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
tuestudy/ipsc | 2014/B/gsong-easy.py | 1 | 2748 | #/usr/bin/env python
import unittest
import sys
class RandomGenerator:
def __init__(self, arr):
self.randoms = arr
self.curr = 43
self.c = [0 for x in xrange(43)]
def random(self):
r = 43
s = 22
m = 2 ** 32
val = self.randoms[self.curr - s] - self.randoms[self.curr - r] - self.c[self.curr - 1]
self.randoms.append(val % m)
self.c.append(1 if val < 0 else 0)
self.curr += 1
return self.randoms[self.curr - 1]
def applymove(diagram, move, rg):
if move not in ['l', 'r']:
return
lastval = -1
newdiagram = []
if move == 'l':
for val in diagram:
if val == 0:
continue
if val == lastval:
newdiagram[-1] = val + lastval
lastval = -1
else:
newdiagram.append(val)
lastval = val
else:
for val in reversed(diagram):
if val == 0:
continue
if val == lastval:
newdiagram[0] = val + lastval
lastval = -1
else:
newdiagram.insert(0, val)
lastval = val
num_empty = len(diagram) - len(newdiagram)
if num_empty == 0:
return newdiagram
emptycells = [0 for _ in xrange(num_empty)]
offset = 0
if move == 'l':
offset = len(newdiagram)
newdiagram.extend(emptycells)
else:
emptycells.extend(newdiagram)
newdiagram = emptycells
if diagram == newdiagram:
return newdiagram
(new_value, pos) = getRandomNumber(rg, num_empty)
newdiagram[offset + pos] = new_value
return newdiagram
def getRandomNumber(rg, num_empty):
pos = rg.random() % num_empty
new_value = 0
if rg.random() % 10 == 0:
new_value = 4
else:
new_value = 2
return (new_value, pos)
class Test(unittest.TestCase):
def test_def(self):
pass
def main(stdin, stdout):
casecount = int(stdin.readline())
for case in range(casecount):
stdin.readline()
strip_size = int(stdin.readline().strip())
diagram = [int(x) for x in stdin.readline().strip().split()]
arr = [int(x) for x in stdin.readline().strip().split()]
rg = RandomGenerator(arr)
mode = 'b1'
if mode == 'b1':
moves_count = stdin.readline()
moves = stdin.readline().strip()
# b1 solution
for move in moves:
diagram = applymove(diagram, move, rg)
print ' '.join(str(x) for x in diagram)
else:
# b2 solution
pass
if __name__ == '__main__':
main(sys.stdin, sys.stdout)
| mit |
talhajaved/nyuadmarket | flask/lib/python2.7/site-packages/mako/pygen.py | 15 | 9447 | # mako/pygen.py
# Copyright (C) 2006-2013 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for generating and formatting literal Python code."""
import re
from mako import exceptions
class PythonPrinter(object):
def __init__(self, stream):
# indentation counter
self.indent = 0
# a stack storing information about why we incremented
# the indentation counter, to help us determine if we
# should decrement it
self.indent_detail = []
# the string of whitespace multiplied by the indent
# counter to produce a line
self.indentstring = " "
# the stream we are writing to
self.stream = stream
# a list of lines that represents a buffered "block" of code,
# which can be later printed relative to an indent level
self.line_buffer = []
self.in_indent_lines = False
self._reset_multi_line_flags()
def write(self, text):
self.stream.write(text)
def write_indented_block(self, block):
"""print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of
the current indent level."""
self.in_indent_lines = False
for l in re.split(r'\r?\n', block):
self.line_buffer.append(l)
def writelines(self, *lines):
"""print a series of lines of python."""
for line in lines:
self.writeline(line)
def writeline(self, line):
"""print a line of python, indenting it according to the current
indent level.
this also adjusts the indentation counter according to the
content of the line.
"""
if not self.in_indent_lines:
self._flush_adjusted_lines()
self.in_indent_lines = True
if (line is None or
re.match(r"^\s*#",line) or
re.match(r"^\s*$", line)
):
hastext = False
else:
hastext = True
is_comment = line and len(line) and line[0] == '#'
# see if this line should decrease the indentation level
if (not is_comment and
(not hastext or self._is_unindentor(line))
):
if self.indent > 0:
self.indent -=1
# if the indent_detail stack is empty, the user
# probably put extra closures - the resulting
# module wont compile.
if len(self.indent_detail) == 0:
raise exceptions.SyntaxException(
"Too many whitespace closures")
self.indent_detail.pop()
if line is None:
return
# write the line
self.stream.write(self._indent_line(line) + "\n")
# see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line):
# increment indentation count, and also
# keep track of what the keyword was that indented us,
# if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for|with)", line)
if match:
# its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1)
self.indent +=1
self.indent_detail.append(indentor)
else:
indentor = None
# its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)",
line)
if m2:
self.indent += 1
self.indent_detail.append(indentor)
def close(self):
"""close this printer, flushing any remaining lines."""
self._flush_adjusted_lines()
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True
# should we decide that its not good enough, heres
# more stuff to check.
#keyword = match.group(1)
# match the original indent keyword
#for crit in [
# (r'if|elif', r'else|elif'),
# (r'try', r'except|finally|else'),
# (r'while|for', r'else'),
#]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword):
# return True
#return False
def _indent_line(self, line, stripspace=''):
"""indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the
start of the line before indenting."""
return re.sub(r"^%s" % stripspace, self.indentstring
* self.indent, line)
def _reset_multi_line_flags(self):
"""reset the flags which would indicate we are in a backslashed
or triple-quoted section."""
self.backslashed, self.triplequoted = False, False
def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block,
via backslash or triple-quote."""
# we are only looking for explicitly joined lines here, not
# implicit ones (i.e. brackets, braces etc.). this is just to
# guard against the possibility of modifying the space inside of
# a literal multiline string with unfortunately placed
# whitespace
current_state = (self.backslashed or self.triplequoted)
if re.search(r"\\$", line):
self.backslashed = True
else:
self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
return current_state
def _flush_adjusted_lines(self):
stripspace = None
self._reset_multi_line_flags()
for entry in self.line_buffer:
if self._in_multi_line(entry):
self.stream.write(entry + "\n")
else:
entry = entry.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
stripspace = re.match(r"^([ \t]*)", entry).group(1)
self.stream.write(self._indent_line(entry, stripspace) + "\n")
self.line_buffer = []
self._reset_multi_line_flags()
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = (state[backslashed] or state[triplequoted])
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)):]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
if m:
state[triplequoted] = False
else:
m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
else:
m, line = match(r'#', line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace = ''):
return re.sub(r"^%s" % stripspace, '', line)
lines = []
stripspace = None
for line in re.split(r'\r?\n', text):
if in_multi_line(line):
lines.append(line)
else:
line = line.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
stripspace = re.match(r"^([ \t]*)", line).group(1)
lines.append(_indent_line(line, stripspace))
return "\n".join(lines)
| mit |
ClusterLabs/pacemaker-1.0 | cts/CTSscenarios.py | 1 | 16516 | from CTS import *
from CTStests import CTSTest
from CTSaudits import ClusterAudit
class ScenarioComponent:
def __init__(self, Env):
self.Env = Env
def IsApplicable(self):
'''Return TRUE if the current ScenarioComponent is applicable
in the given LabEnvironment given to the constructor.
'''
raise ValueError("Abstract Class member (IsApplicable)")
def SetUp(self, CM):
'''Set up the given ScenarioComponent'''
raise ValueError("Abstract Class member (Setup)")
def TearDown(self, CM):
'''Tear down (undo) the given ScenarioComponent'''
raise ValueError("Abstract Class member (Setup)")
class Scenario:
(
'''The basic idea of a scenario is that of an ordered list of
ScenarioComponent objects. Each ScenarioComponent is SetUp() in turn,
and then after the tests have been run, they are torn down using TearDown()
(in reverse order).
A Scenario is applicable to a particular cluster manager iff each
ScenarioComponent is applicable.
A partially set up scenario is torn down if it fails during setup.
''')
def __init__(self, ClusterManager, Components, Audits, Tests):
"Initialize the Scenario from the list of ScenarioComponents"
self.ClusterManager = ClusterManager
self.Components = Components
self.Audits = Audits
self.Tests = Tests
self.BadNews = None
self.TestSets = []
self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0}
self.Sets = []
#self.ns=CTS.NodeStatus(self.Env)
for comp in Components:
if not issubclass(comp.__class__, ScenarioComponent):
raise ValueError("Init value must be subclass of ScenarioComponent")
for audit in Audits:
if not issubclass(audit.__class__, ClusterAudit):
raise ValueError("Init value must be subclass of ClusterAudit")
for test in Tests:
if not issubclass(test.__class__, CTSTest):
raise ValueError("Init value must be a subclass of CTSTest")
def IsApplicable(self):
(
'''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable()
'''
)
for comp in self.Components:
if not comp.IsApplicable():
return None
return 1
def SetUp(self):
'''Set up the Scenario. Return TRUE on success.'''
self.ClusterManager.prepare()
self.ClusterManager.ns.WaitForAllNodesToComeUp(self.ClusterManager.Env["nodes"])
self.audit()
if self.ClusterManager.Env["valgrind-tests"]:
self.ClusterManager.install_helper("cts.supp")
self.BadNews = LogWatcher(self.ClusterManager.Env,
self.ClusterManager["LogFileName"],
self.ClusterManager["BadRegexes"], "BadNews", 0)
self.BadNews.setwatch() # Call after we've figured out what type of log watching to do in LogAudit
j=0
while j < len(self.Components):
if not self.Components[j].SetUp(self.ClusterManager):
# OOPS! We failed. Tear partial setups down.
self.audit()
self.ClusterManager.log("Tearing down partial setup")
self.TearDown(j)
return None
j=j+1
self.audit()
return 1
def TearDown(self, max=None):
'''Tear Down the Scenario - in reverse order.'''
if max == None:
max = len(self.Components)-1
j=max
while j >= 0:
self.Components[j].TearDown(self.ClusterManager)
j=j-1
self.audit()
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
def run(self, Iterations):
self.ClusterManager.oprofileStart()
try:
self.run_loop(Iterations)
self.ClusterManager.oprofileStop()
except:
self.ClusterManager.oprofileStop()
raise
def run_loop(self, Iterations):
raise ValueError("Abstract Class member (run_loop)")
def run_test(self, test, testcount):
nodechoice = self.ClusterManager.Env.RandomNode()
ret = 1
where = ""
did_run = 0
self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) +"["+ ("%d" % testcount).rjust(3) +"]")
starttime = test.set_timer()
if not test.setup(nodechoice):
self.ClusterManager.log("Setup failed")
ret = 0
elif not test.canrunnow(nodechoice):
self.ClusterManager.log("Skipped")
test.skipped()
else:
did_run = 1
ret = test(nodechoice)
if not test.teardown(nodechoice):
self.ClusterManager.log("Teardown failed")
answer = raw_input('Continue? [nY] ')
if answer and answer == "n":
raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice))
ret = 0
stoptime=time.time()
self.ClusterManager.oprofileSave(testcount)
elapsed_time = stoptime - starttime
test_time = stoptime - test.get_timer()
if not test.has_key("min_time"):
test["elapsed_time"] = elapsed_time
test["min_time"] = test_time
test["max_time"] = test_time
else:
test["elapsed_time"] = test["elapsed_time"] + elapsed_time
if test_time < test["min_time"]:
test["min_time"] = test_time
if test_time > test["max_time"]:
test["max_time"] = test_time
if ret:
self.incr("success")
test.log_timer()
else:
self.incr("failure")
self.ClusterManager.statall()
did_run = 1 # Force the test count to be incrimented anyway so test extraction works
self.audit(test.errorstoignore())
return did_run
def summarize(self):
self.ClusterManager.log("****************")
self.ClusterManager.log("Overall Results:" + repr(self.Stats))
self.ClusterManager.log("****************")
stat_filter = {
"calls":0,
"failure":0,
"skipped":0,
"auditfail":0,
}
self.ClusterManager.log("Test Summary")
for test in self.Tests:
for key in stat_filter.keys():
stat_filter[key] = test.Stats[key]
self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter))
self.ClusterManager.debug("Detailed Results")
for test in self.Tests:
self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats))
self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
def audit(self, LocalIgnore=[]):
errcount=0
ignorelist = []
ignorelist.append("CTS:")
ignorelist.extend(LocalIgnore)
ignorelist.extend(self.ClusterManager.errorstoignore())
# This makes sure everything is stabilized before starting...
failed = 0
for audit in self.Audits:
if not audit():
self.ClusterManager.log("Audit " + audit.name() + " FAILED.")
failed += 1
else:
self.ClusterManager.debug("Audit " + audit.name() + " passed.")
while errcount < 1000:
match = None
if self.BadNews:
match=self.BadNews.look(0)
if match:
add_err = 1
for ignore in ignorelist:
if add_err == 1 and re.search(ignore, match):
add_err = 0
if add_err == 1:
self.ClusterManager.log("BadNews: " + match)
self.incr("BadNews")
errcount=errcount+1
else:
break
else:
answer = raw_input('Big problems. Continue? [nY]')
if answer and answer == "n":
self.ClusterManager.log("Shutting down.")
self.summarize()
self.TearDown()
raise ValueError("Looks like we hit a BadNews jackpot!")
return failed
class AllOnce(Scenario):
'''Every Test Once''' # Accessable as __doc__
def run_loop(self, Iterations):
testcount=1
for test in self.Tests:
self.run_test(test, testcount)
testcount += 1
class RandomTests(Scenario):
'''Random Test Execution'''
def run_loop(self, Iterations):
testcount=1
while testcount <= Iterations:
test = self.ClusterManager.Env.RandomGen.choice(self.Tests)
self.run_test(test, testcount)
testcount += 1
class BasicSanity(Scenario):
'''Basic Cluster Sanity'''
def run_loop(self, Iterations):
testcount=1
while testcount <= Iterations:
test = self.Environment.RandomGen.choice(self.Tests)
self.run_test(test, testcount)
testcount += 1
class Sequence(Scenario):
'''Named Tests in Sequence'''
def run_loop(self, Iterations):
testcount=1
while testcount <= Iterations:
for test in self.Tests:
self.run_test(test, testcount)
testcount += 1
class InitClusterManager(ScenarioComponent):
(
'''InitClusterManager is the most basic of ScenarioComponents.
This ScenarioComponent simply starts the cluster manager on all the nodes.
It is fairly robust as it waits for all nodes to come up before starting
as they might have been rebooted or crashed for some reason beforehand.
''')
def __init__(self, Env):
pass
def IsApplicable(self):
'''InitClusterManager is so generic it is always Applicable'''
return 1
def SetUp(self, CM):
'''Basic Cluster Manager startup. Start everything'''
CM.prepare()
# Clear out the cobwebs ;-)
self.TearDown(CM)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on all nodes.")
return CM.startall(verbose=True)
def TearDown(self, CM):
'''Set up the given ScenarioComponent'''
# Stop the cluster manager everywhere
CM.log("Stopping Cluster Manager on all nodes")
return CM.stopall(verbose=True)
class PingFest(ScenarioComponent):
(
'''PingFest does a flood ping to each node in the cluster from the test machine.
If the LabEnvironment Parameter PingSize is set, it will be used as the size
of ping packet requested (via the -s option). If it is not set, it defaults
to 1024 bytes.
According to the manual page for ping:
Outputs packets as fast as they come back or one hundred times per
second, whichever is more. For every ECHO_REQUEST sent a period ``.''
is printed, while for every ECHO_REPLY received a backspace is printed.
This provides a rapid display of how many packets are being dropped.
Only the super-user may use this option. This can be very hard on a net-
work and should be used with caution.
''' )
def __init__(self, Env):
self.Env = Env
def IsApplicable(self):
'''PingFests are always applicable ;-)
'''
return 1
def SetUp(self, CM):
'''Start the PingFest!'''
self.PingSize=1024
if CM.Env.has_key("PingSize"):
self.PingSize=CM.Env["PingSize"]
CM.log("Starting %d byte flood pings" % self.PingSize)
self.PingPids=[]
for node in CM.Env["nodes"]:
self.PingPids.append(self._pingchild(node))
CM.log("Ping PIDs: " + repr(self.PingPids))
return 1
def TearDown(self, CM):
'''Stop it right now! My ears are pinging!!'''
for pid in self.PingPids:
if pid != None:
CM.log("Stopping ping process %d" % pid)
os.kill(pid, signal.SIGKILL)
def _pingchild(self, node):
Args = ["ping", "-qfn", "-s", str(self.PingSize), node]
sys.stdin.flush()
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid < 0:
self.Env.log("Cannot fork ping child")
return None
if pid > 0:
return pid
# Otherwise, we're the child process.
os.execvp("ping", Args)
self.Env.log("Cannot execvp ping: " + repr(Args))
sys.exit(1)
class PacketLoss(ScenarioComponent):
(
'''
It would be useful to do some testing of CTS with a modest amount of packet loss
enabled - so we could see that everything runs like it should with a certain
amount of packet loss present.
''')
def IsApplicable(self):
'''always Applicable'''
return 1
def SetUp(self, CM):
'''Reduce the reliability of communications'''
if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
return 1
for node in CM.Env["nodes"]:
CM.reducecomm_node(node)
CM.log("Reduce the reliability of communications")
return 1
def TearDown(self, CM):
'''Fix the reliability of communications'''
if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
return 1
for node in CM.Env["nodes"]:
CM.unisolate_node(node)
CM.log("Fix the reliability of communications")
class BasicSanityCheck(ScenarioComponent):
(
'''
''')
def IsApplicable(self):
return self.Env["DoBSC"]
def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
self.TearDown(CM)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on BSC node(s).")
return CM.startall()
def TearDown(self, CM):
CM.log("Stopping Cluster Manager on BSC node(s).")
return CM.stopall()
class Benchmark(ScenarioComponent):
(
'''
''')
def IsApplicable(self):
return self.Env["benchmark"]
def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
self.TearDown(CM)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on all node(s).")
return CM.startall()
def TearDown(self, CM):
CM.log("Stopping Cluster Manager on all node(s).")
return CM.stopall()
class RollingUpgrade(ScenarioComponent):
(
'''
Test a rolling upgrade between two versions of the stack
''')
def __init__(self, Env):
self.Env = Env
def IsApplicable(self):
if not self.Env["rpm-dir"]:
return None
if not self.Env["current-version"]:
return None
if not self.Env["previous-version"]:
return None
return 1
def install(self, node, version):
target_dir = "/tmp/rpm-%s" % version
src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version)
rc = self.CM.rsh(node, "mkdir -p %s" % target_dir)
rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir))
rc = self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir))
return self.success()
def upgrade(self, node):
return self.install(node, self.CM.Env["current-version"])
def downgrade(self, node):
return self.install(node, self.CM.Env["previous-version"])
def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
CM.stopall()
CM.log("Downgrading all nodes to %s." % self.Env["previous-version"])
for node in self.Env["nodes"]:
if not self.downgrade(node):
CM.log("Couldn't downgrade %s" % node)
return None
return 1
def TearDown(self, CM):
# Stop everything
CM.log("Stopping Cluster Manager on Upgrade nodes.")
CM.stopall()
CM.log("Upgrading all nodes to %s." % self.Env["current-version"])
for node in self.Env["nodes"]:
if not self.upgrade(node):
CM.log("Couldn't upgrade %s" % node)
return None
return 1
| gpl-2.0 |
roshannaik/storm | dev-tools/test-ns.py | 23 | 1027 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen, PIPE
import sys
import os
os.chdir("storm-core")
ns = sys.argv[1]
pipe = Popen(["mvn", "test", "-DfailIfNoTests=false", "-Dtest=%s"%ns])
pipe.wait()
os.chdir("..")
sys.exit(pipe.returncode)
| apache-2.0 |
fbsder/openthread | tools/harness-automation/cases/sed_9_2_10.py | 16 | 1871 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class SED_9_2_10(HarnessCase):
role = HarnessCase.ROLE_SED
case = '9 2 10'
golden_devices_required = 4
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
amiguez/youtube-dl | test/test_swfinterp.py | 158 | 2224 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import errno
import io
import json
import re
import subprocess
from youtube_dl.swfinterp import SWFInterpreter
TEST_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'swftests')
class TestSWFInterpreter(unittest.TestCase):
pass
def _make_testfunc(testfile):
m = re.match(r'^(.*)\.(as)$', testfile)
if not m:
return
test_id = m.group(1)
def test_func(self):
as_file = os.path.join(TEST_DIR, testfile)
swf_file = os.path.join(TEST_DIR, test_id + '.swf')
if ((not os.path.exists(swf_file)) or
os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
# Recompile
try:
subprocess.check_call([
'mxmlc', '-output', swf_file,
'-static-link-runtime-shared-libraries', as_file])
except OSError as ose:
if ose.errno == errno.ENOENT:
print('mxmlc not found! Skipping test.')
return
raise
with open(swf_file, 'rb') as swf_f:
swf_content = swf_f.read()
swfi = SWFInterpreter(swf_content)
with io.open(as_file, 'r', encoding='utf-8') as as_f:
as_content = as_f.read()
def _find_spec(key):
m = re.search(
r'(?m)^//\s*%s:\s*(.*?)\n' % re.escape(key), as_content)
if not m:
raise ValueError('Cannot find %s in %s' % (key, testfile))
return json.loads(m.group(1))
input_args = _find_spec('input')
output = _find_spec('output')
swf_class = swfi.extract_class(test_id)
func = swfi.extract_function(swf_class, 'main')
res = func(input_args)
self.assertEqual(res, output)
test_func.__name__ = str('test_swf_' + test_id)
setattr(TestSWFInterpreter, test_func.__name__, test_func)
for testfile in os.listdir(TEST_DIR):
_make_testfunc(testfile)
if __name__ == '__main__':
unittest.main()
| unlicense |
gauravbose/digital-menu | digimenu2/django/contrib/admindocs/urls.py | 574 | 1183 | from django.conf.urls import url
from django.contrib.admindocs import views
urlpatterns = [
url('^$',
views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'),
name='django-admindocs-docroot'),
url('^bookmarklets/$',
views.BookmarkletsView.as_view(),
name='django-admindocs-bookmarklets'),
url('^tags/$',
views.TemplateTagIndexView.as_view(),
name='django-admindocs-tags'),
url('^filters/$',
views.TemplateFilterIndexView.as_view(),
name='django-admindocs-filters'),
url('^views/$',
views.ViewIndexView.as_view(),
name='django-admindocs-views-index'),
url('^views/(?P<view>[^/]+)/$',
views.ViewDetailView.as_view(),
name='django-admindocs-views-detail'),
url('^models/$',
views.ModelIndexView.as_view(),
name='django-admindocs-models-index'),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.ModelDetailView.as_view(),
name='django-admindocs-models-detail'),
url('^templates/(?P<template>.*)/$',
views.TemplateDetailView.as_view(),
name='django-admindocs-templates'),
]
| bsd-3-clause |
jabesq/home-assistant | tests/components/time_date/test_sensor.py | 8 | 4811 | """The tests for Kira sensor platform."""
import unittest
from unittest.mock import patch
import homeassistant.components.time_date.sensor as time_date
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
class TestTimeDateSensor(unittest.TestCase):
"""Tests the Kira Sensor platform."""
# pylint: disable=invalid-name
DEVICES = []
def add_entities(self, devices):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.DEFAULT_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
def tearDown(self):
"""Stop everything that was started."""
dt_util.set_default_time_zone(self.DEFAULT_TIME_ZONE)
self.hass.stop()
# pylint: disable=protected-access
def test_intervals(self):
"""Test timing intervals of sensors."""
device = time_date.TimeDateSensor(self.hass, 'time')
now = dt_util.utc_from_timestamp(45)
next_time = device.get_next_interval(now)
assert next_time == dt_util.utc_from_timestamp(60)
device = time_date.TimeDateSensor(self.hass, 'beat')
now = dt_util.utc_from_timestamp(29)
next_time = device.get_next_interval(now)
assert next_time == dt_util.utc_from_timestamp(86.4)
device = time_date.TimeDateSensor(self.hass, 'date_time')
now = dt_util.utc_from_timestamp(1495068899)
next_time = device.get_next_interval(now)
assert next_time == dt_util.utc_from_timestamp(1495068900)
now = dt_util.utcnow()
device = time_date.TimeDateSensor(self.hass, 'time_date')
next_time = device.get_next_interval()
assert next_time > now
def test_states(self):
"""Test states of sensors."""
now = dt_util.utc_from_timestamp(1495068856)
device = time_date.TimeDateSensor(self.hass, 'time')
device._update_internal_state(now)
assert device.state == "00:54"
device = time_date.TimeDateSensor(self.hass, 'date')
device._update_internal_state(now)
assert device.state == "2017-05-18"
device = time_date.TimeDateSensor(self.hass, 'time_utc')
device._update_internal_state(now)
assert device.state == "00:54"
device = time_date.TimeDateSensor(self.hass, 'beat')
device._update_internal_state(now)
assert device.state == "@079"
device = time_date.TimeDateSensor(self.hass, 'date_time_iso')
device._update_internal_state(now)
assert device.state == "2017-05-18T00:54:00"
# pylint: disable=no-member
def test_timezone_intervals(self):
"""Test date sensor behavior in a timezone besides UTC."""
new_tz = dt_util.get_time_zone('America/New_York')
assert new_tz is not None
dt_util.set_default_time_zone(new_tz)
device = time_date.TimeDateSensor(self.hass, 'date')
now = dt_util.utc_from_timestamp(50000)
next_time = device.get_next_interval(now)
# start of local day in EST was 18000.0
# so the second day was 18000 + 86400
assert next_time.timestamp() == 104400
new_tz = dt_util.get_time_zone('America/Edmonton')
assert new_tz is not None
dt_util.set_default_time_zone(new_tz)
now = dt_util.parse_datetime('2017-11-13 19:47:19-07:00')
device = time_date.TimeDateSensor(self.hass, 'date')
next_time = device.get_next_interval(now)
assert (next_time.timestamp() ==
dt_util.as_timestamp('2017-11-14 00:00:00-07:00'))
@patch('homeassistant.util.dt.utcnow',
return_value=dt_util.parse_datetime('2017-11-14 02:47:19-00:00'))
def test_timezone_intervals_empty_parameter(self, _):
"""Test get_interval() without parameters."""
new_tz = dt_util.get_time_zone('America/Edmonton')
assert new_tz is not None
dt_util.set_default_time_zone(new_tz)
device = time_date.TimeDateSensor(self.hass, 'date')
next_time = device.get_next_interval()
assert (next_time.timestamp() ==
dt_util.as_timestamp('2017-11-14 00:00:00-07:00'))
def test_icons(self):
"""Test attributes of sensors."""
device = time_date.TimeDateSensor(self.hass, 'time')
assert device.icon == "mdi:clock"
device = time_date.TimeDateSensor(self.hass, 'date')
assert device.icon == "mdi:calendar"
device = time_date.TimeDateSensor(self.hass, 'date_time')
assert device.icon == "mdi:calendar-clock"
device = time_date.TimeDateSensor(self.hass, 'date_time_iso')
assert device.icon == "mdi:calendar-clock"
| apache-2.0 |
Konubinix/uzbl | tests/event-manager/testcore.py | 8 | 2874 | #!/usr/bin/env python
# vi: set et ts=4:
import six
import unittest
from mock import Mock
from uzbl.core import Uzbl
class TestUzbl(unittest.TestCase):
def setUp(self):
options = Mock()
options.print_events = False
self.em = Mock()
self.proto = Mock()
self.uzbl = Uzbl(self.em, self.proto, options)
def test_repr(self):
r = '%r' % self.uzbl
six.assertRegex(self, r, r'<uzbl\(.*\)>')
def test_event_handler(self):
handler = Mock()
event, arg = 'FOO', 'test'
self.uzbl.connect(event, handler)
self.uzbl.event(event, arg)
handler.assert_called_once_with(arg)
def test_parse_sets_name(self):
name = 'spam'
self.uzbl.parse_msg(' '.join(['EVENT', name, 'FOO', 'BAR']))
self.assertEqual(self.uzbl.name, name)
def test_parse_sends_event(self):
handler = Mock()
event, arg = 'FOO', 'test'
self.uzbl.connect(event, handler)
self.uzbl.parse_msg(' '.join(['EVENT', 'instance-name', event, arg]))
handler.assert_called_once_with(arg)
def test_malformed_message(self):
# Should not crash
self.uzbl.parse_msg('asdaf')
self.assertTrue(True)
def test_send(self):
self.uzbl.send('hello ')
self.proto.push.assert_called_once_with('hello\n'.encode('utf-8'))
def test_close_calls_remove_instance(self):
self.uzbl.close()
self.em.remove_instance.assert_called_once_with(self.proto.socket)
def test_close_cleans_plugins(self):
p1, p2 = Mock(), Mock()
self.uzbl._plugin_instances = (p1, p2)
self.uzbl.plugins = {}
self.uzbl.close()
p1.cleanup.assert_called_once_with()
p2.cleanup.assert_called_once_with()
def test_close_connection_closes_protocol(self):
self.uzbl.close_connection(Mock())
self.proto.close.assert_called_once_with()
def test_exit_triggers_close(self):
self.uzbl.parse_msg(' '.join(['EVENT', 'spam', 'INSTANCE_EXIT']))
self.em.remove_instance.assert_called_once_with(self.proto.socket)
def test_instance_start(self):
pid = 1234
self.em.plugind.per_instance_plugins = []
self.uzbl.parse_msg(
' '.join(['EVENT', 'spam', 'INSTANCE_START', str(pid)])
)
self.assertEqual(self.uzbl.pid, pid)
def test_init_plugins(self):
u = self.uzbl
class FooPlugin(object):
def __init__(self, uzbl): pass
class BarPlugin(object):
def __init__(self, uzbl): pass
self.em.plugind.per_instance_plugins = [FooPlugin, BarPlugin]
u.init_plugins()
self.assertEqual(len(u.plugins), 2)
for t in (FooPlugin, BarPlugin):
self.assertIn(t, u.plugins)
self.assertTrue(isinstance(u.plugins[t], t))
| gpl-3.0 |
danwangyiqie/danwangyiqie.github.io | crawler.py | 1 | 2335 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys
import bs4
import datetime
import urllib2
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf8')
NEW_LAST_URL=''
TAG_FILE='.last_url'
def find_new_urls(opener):
global TAG_FILE
base_url=u'http://www.sexychinese.net/page/'
last_url=''
with open(TAG_FILE) as f:
last_url=f.readline().strip()
print last_url
targets=[]
for i in range(1,11):
url=base_url+str(i)
#print url
#print opener.open(url).read()
soup=BeautifulSoup(opener.open(url),'lxml')
links=soup.find_all("h2",class_="post-title")
for l in links:
current_url=l.a['href'].strip()
if current_url==last_url:
print '==========FOUND THE LAST ONE==========='
return targets
targets.append(current_url)
return targets
OUTROOT='./images'
if __name__=='__main__':
opener=urllib2.build_opener()
opener.addheaders=[('User-agent', 'Mozilla/5.0')]
targets=find_new_urls(opener)
targets.reverse()
NEW_LAST_URL=targets[-1]
#print NEW_LAST_URL
#print targets
subdirs=[ int(i) for i in os.listdir(OUTROOT) if '.' not in i]
subdirs.sort()
index=subdirs[-1]+1
print index
for t in targets:
if not os.path.exists('%s/%d'%(OUTROOT,index)):
os.makedirs('%s/%d'%(OUTROOT,index))
print index,len(targets),'\t',t
soup=BeautifulSoup(opener.open(t),'lxml')
print soup.title.string
f_title=open('%s/%d/title'%(OUTROOT,index),'w')
f_title.write(soup.title.string)
f_title.close()
#tag_div=soup.find("div",class_="post-content")
tag_div=soup.find("div",class_="entry-inner")
for tag_img in tag_div.find_all('img'):
print tag_img["src"]
url=tag_img['src']
name=url.split('/')[-1]
#print name
img_title=open('%s/%d/%s'%(OUTROOT,index,name),'w')
try:
img_title.write(opener.open(url).read())
except:
print '[error] at url',url
img_title.close()
#sys.exit(1)
index+=1
if len(NEW_LAST_URL)!=0:
with open(TAG_FILE,'w') as f:
f.write(NEW_LAST_URL)
| mit |
DaniilLeksin/gc | wx/tools/Editra/src/syntax/_ooc.py | 3 | 2634 | ###############################################################################
# Name: ooc.py #
# Purpose: Define OOC Syntax Highlighting Support and other features #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2010 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
FILE: ooc.py
@author: Cody Precord
@summary: Lexer configuration file for OOC (Out of Class)
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: $"
__revision__ = "$Revision: $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local imports
import synglob
import syndata
import _cpp
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
KEYWORDS = (0, "class cover interface implement func abstract extends from "
"this super new const final static include import use extern "
"inline proto break continue fallthrough operator if else for "
"while do switch case as in version return true false null "
"default")
TYPES = (1, "Int Int8 Int16 Int32 Int64 Int80 Int128 UInt UInt8 UInt16 UInt32 "
"UInt64 UInt80 UInt128 Octet Short UShort Long ULong LLong ULLong "
"Float Double LDouble Float32 Float64 Float128 Char UChar WChar "
"String Void Pointer Bool SizeT This")
#------------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for many OOC"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_CPP)
def GetKeywords(self):
"""Returns Specified Keywords List"""
return [KEYWORDS, TYPES, _cpp.DOC_KEYWORDS]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return _cpp.SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set"""
return [_cpp.FOLD, _cpp.FOLD_COM]
def GetCommentPattern(self):
"""Return comment pattern for OOC"""
return [u'//']
| apache-2.0 |
samepage-labs/luigi | luigi/contrib/sge.py | 11 | 11055 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SGE batch system Tasks.
Adapted by Jake Feala (@jfeala) from
`LSF extension <https://github.com/dattalab/luigi/blob/lsf/luigi/lsf.py>`_
by Alex Wiltschko (@alexbw)
Maintained by Jake Feala (@jfeala)
SunGrid Engine is a job scheduler used to allocate compute resources on a
shared cluster. Jobs are submitted using the ``qsub`` command and monitored
using ``qstat``. To get started, install luigi on all nodes.
To run luigi workflows on an SGE cluster, subclass
:class:`luigi.contrib.sge.SGEJobTask` as you would any :class:`luigi.Task`,
but override the ``work()`` method, instead of ``run()``, to define the job
code. Then, run your Luigi workflow from the master node, assigning > 1
``workers`` in order to distribute the tasks in parallel across the cluster.
The following is an example usage (and can also be found in ``sge_tests.py``)
.. code-block:: python
import logging
import luigi
from luigi.contrib.sge import SGEJobTask
logger = logging.getLogger('luigi-interface')
class TestJobTask(SGEJobTask):
i = luigi.Parameter()
def work(self):
logger.info('Running test job...')
with open(self.output().path, 'w') as f:
f.write('this is a test')
def output(self):
return luigi.LocalTarget(os.path.join('/home', 'testfile_' + str(self.i)))
if __name__ == '__main__':
tasks = [TestJobTask(i=str(i), n_cpu=i+1) for i in range(3)]
luigi.build(tasks, local_scheduler=True, workers=3)
The ``n-cpu`` parameter allows you to define different compute resource
requirements (or slots, in SGE terms) for each task. In this example, the
third Task asks for 3 CPU slots. If your cluster only contains nodes with
2 CPUs, this task will hang indefinitely in the queue. See the docs for
:class:`luigi.contrib.sge.SGEJobTask` for other SGE parameters. As for any
task, you can also set these in your luigi configuration file as shown below.
The default values below were matched to the values used by MIT StarCluster,
an open-source SGE cluster manager for use with Amazon EC2::
[SGEJobTask]
shared-tmp-dir = /home
parallel-env = orte
n-cpu = 2
"""
# This extension is modeled after the hadoop.py approach.
#
# Implementation notes
# The procedure:
# - Pickle the class
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner function hits the work button on it
import os
import subprocess
import time
import sys
import logging
import random
import shutil
try:
import cPickle as pickle
except ImportError:
import pickle
import luigi
import luigi.hadoop
from luigi.contrib import sge_runner
logger = logging.getLogger('luigi-interface')
logger.propagate = 0
POLL_TIME = 5 # decided to hard-code rather than configure here
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u'
def _parse_qsub_job_id(qsub_out):
"""Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted"
"""
return int(qsub_out.split()[2])
def _build_qsub_command(cmd, job_name, outfile, errfile, pe, n_cpu):
"""Submit shell command to SGE queue via `qsub`"""
qsub_template = """echo {cmd} | qsub -o ":{outfile}" -e ":{errfile}" -V -r y -pe {pe} {n_cpu} -N {job_name}"""
return qsub_template.format(
cmd=cmd, job_name=job_name, outfile=outfile, errfile=errfile,
pe=pe, n_cpu=n_cpu)
class SGEJobTask(luigi.Task):
"""Base class for executing a job on SunGrid Engine
Override ``work()`` (rather than ``run()``) with your job code.
Parameters:
- n_cpu: Number of CPUs (or "slots") to allocate for the Task. This
value is passed as ``qsub -pe {pe} {n_cpu}``
- parallel_env: SGE parallel environment name. The default is "orte",
the parallel environment installed with MIT StarCluster. If you
are using a different cluster environment, check with your
sysadmin for the right pe to use. This value is passed as {pe}
to the qsub command above.
- shared_tmp_dir: Shared drive accessible from all nodes in the cluster.
Task classes and dependencies are pickled to a temporary folder on
this drive. The default is ``/home``, the NFS share location setup
by StarCluster
"""
n_cpu = luigi.IntParameter(default=2, significant=False)
shared_tmp_dir = luigi.Parameter(default='/home', significant=False)
parallel_env = luigi.Parameter(default='orte', significant=False)
def _fetch_task_failures(self):
if not os.path.exists(self.errfile):
logger.info('No error file')
return []
with open(self.errfile, "r") as f:
errors = f.readlines()
if errors == []:
return errors
if errors[0].strip() == 'stdin: is not a tty': # SGE complains when we submit through a pipe
errors.pop(0)
return errors
def _init_local(self):
# Set up temp folder in shared directory (trim to max filename length)
base_tmp_dir = self.shared_tmp_dir
random_id = '%016x' % random.getrandbits(64)
folder_name = self.task_id + '-' + random_id
self.tmp_dir = os.path.join(base_tmp_dir, folder_name)
max_filename_length = os.fstatvfs(0).f_namemax
self.tmp_dir = self.tmp_dir[:max_filename_length]
logger.info("Tmp dir: %s", self.tmp_dir)
os.makedirs(self.tmp_dir)
# Dump the code to be run into a pickle file
logging.debug("Dumping pickled class")
self._dump(self.tmp_dir)
# Make sure that all the class's dependencies are tarred and available
logging.debug("Tarballing dependencies")
# Grab luigi and the module containing the code to be run
packages = [luigi] + [__import__(self.__module__, None, None, 'dummy')]
luigi.hadoop.create_packages_archive(packages, os.path.join(self.tmp_dir, "packages.tar"))
def run(self):
self._init_local()
self._run_job()
# The procedure:
# - Pickle the class
# - Tarball the dependencies
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner class untars the dependencies
# - Runner function hits the button on the class's work() method
def work(self):
"""Override this method, rather than ``run()``, for your actual work."""
pass
def _dump(self, out_dir=''):
"""Dump instance to file."""
self.job_file = os.path.join(out_dir, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace('(c__main__', "(c" + module_name)
open(self.job_file, "w").write(d)
else:
pickle.dump(self, open(self.job_file, "w"))
def _run_job(self):
# Build a qsub argument that will run sge_runner.py on the directory we've specified
runner_path = sge_runner.__file__
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
job_str = 'python {0} "{1}"'.format(runner_path, self.tmp_dir) # enclose tmp_dir in quotes to protect from special escape chars
# Build qsub submit command
self.outfile = os.path.join(self.tmp_dir, 'job.out')
self.errfile = os.path.join(self.tmp_dir, 'job.err')
submit_cmd = _build_qsub_command(job_str, self.task_family, self.outfile,
self.errfile, self.parallel_env, self.n_cpu)
logger.debug('qsub command: \n' + submit_cmd)
# Submit the job and grab job ID
output = subprocess.check_output(submit_cmd, shell=True)
self.job_id = _parse_qsub_job_id(output)
logger.debug("Submitted job to qsub with response:\n" + output)
self._track_job()
# Now delete the temporaries, if they're there.
if self.tmp_dir and os.path.exists(self.tmp_dir):
logger.info('Removing temporary directory %s' % self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def _track_job(self):
while True:
# Sleep for a little bit
time.sleep(POLL_TIME)
# See what the job's up to
# ASSUMPTION
qstat_out = subprocess.check_output(['qstat'])
sge_status = _parse_qstat_state(qstat_out, self.job_id)
if sge_status == 'r':
logger.info('Job is running...')
elif sge_status == 'qw':
logger.info('Job is pending...')
elif 'E' in sge_status:
logger.error('Job has FAILED:\n' + '\n'.join(self._fetch_task_failures()))
break
elif sge_status == 't' or sge_status == 'u':
# Then the job could either be failed or done.
errors = self._fetch_task_failures()
if not errors:
logger.info('Job is done')
else:
logger.error('Job has FAILED:\n' + '\n'.join(errors))
break
else:
logger.info('Job status is UNKNOWN!')
logger.info('Status is : %s' % sge_status)
raise Exception("job status isn't one of ['r', 'qw', 'E*', 't', 'u']: %s" % sge_status)
class LocalSGEJobTask(SGEJobTask):
"""A local version of SGEJobTask, for easier debugging.
This version skips the ``qsub`` steps and simply runs ``work()``
on the local node, so you don't need to be on an SGE cluster to
use your Task in a test workflow.
"""
def run(self):
self.work()
| apache-2.0 |
hackoder/django-masquerade | masquerade/tests.py | 1 | 3250 | from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, Client
from mock import Mock
from masquerade.middleware import MasqueradeMiddleware
import masquerade.views
class MasqueradeTestCase(TestCase):
"""
Tests for django-masquerade
"""
def setUp(self):
u = User.objects.create_user(username='generic',
email='[email protected]', password='abc123')
u.is_staff = False
u.is_superuser = False
u.save()
u = User.objects.create_user(username='super',
email='[email protected]', password='abc123')
u.is_staff = True
u.is_superuser = True
u.save()
u = User.objects.create_user(username='staff',
email='[email protected]', password='abc123')
u.is_staff = True
u.is_superuser = False
u.save()
def test_mask_form_permissions(self):
settings.MASQUERADE_REQUIRE_SUPERUSER = False
# log in as superuser
c = Client()
self.assert_(c.login(username='super', password='abc123'))
# hit masquerade form with bad username
response = c.post(reverse('masquerade.views.mask'), {'mask_user': 'nobody'})
# verify form comes back with error
self.assert_(response.status_code == 200)
self.assert_(response.context['form'].is_valid() == False)
# hit masquerade form with generic username
response = c.post(reverse('masquerade.views.mask'),
{'mask_user': 'generic'})
self.assert_(response.status_code == 302)
# make sure non-staff user cannot user form
c = Client()
c.login(username='generic', password='abc123')
response = c.post(reverse('masquerade.views.mask'), {'mask_user': 'nobody'})
self.assert_(response.status_code == 403)
# make sure staff user can use form
c = Client()
c.login(username='staff', password='abc123')
response = c.post(reverse('masquerade.views.mask'), {'mask_user': 'nobody'})
self.assert_(response.status_code == 200)
# ... unless require superuser setting is true.
masquerade.views.MASQUERADE_REQUIRE_SUPERUSER = True
c = Client()
c.login(username='staff', password='abc123')
response = c.post(reverse('masquerade.views.mask'), {'mask_user': 'nobody'})
self.assert_(response.status_code == 403)
def test_mask(self):
mw = MasqueradeMiddleware()
request = Mock(spec=HttpRequest)
request.session = {'mask_user': 'generic'}
request.user = User.objects.get(username='super')
mw.process_request(request)
self.assert_(request.user.is_masked == True)
self.assert_(request.user == User.objects.get(username='generic'))
def test_unmask(self):
mw = MasqueradeMiddleware()
request = Mock(spec=HttpRequest)
request.session = {}
request.user = User.objects.get(username='super')
mw.process_request(request)
self.assert_(request.user.is_masked == False)
self.assert_(request.user == User.objects.get(username='super'))
| apache-2.0 |
iulian787/spack | lib/spack/external/_pytest/config.py | 11 | 52505 | """ command line options, ini-file and conftest.py processing. """
from __future__ import absolute_import, division, print_function
import argparse
import shlex
import traceback
import types
import warnings
import py
# DON't import pytest here because it causes import cycle troubles
import sys
import os
import _pytest._code
import _pytest.hookspec # the extension point definitions
import _pytest.assertion
from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker
from _pytest.compat import safe_str
hookimpl = HookimplMarker("pytest")
hookspec = HookspecMarker("pytest")
# pytest startup
#
class ConftestImportFailure(Exception):
def __init__(self, path, excinfo):
Exception.__init__(self, path, excinfo)
self.path = path
self.excinfo = excinfo
def __str__(self):
etype, evalue, etb = self.excinfo
formatted = traceback.format_tb(etb)
# The level of the tracebacks we want to print is hand crafted :(
return repr(evalue) + '\n' + ''.join(formatted[2:])
def main(args=None, plugins=None):
""" return exit code, after performing an in-process test run.
:arg args: list of command line arguments.
:arg plugins: list of plugin objects to be auto-registered during
initialization.
"""
try:
try:
config = _prepareconfig(args, plugins)
except ConftestImportFailure as e:
tw = py.io.TerminalWriter(sys.stderr)
for line in traceback.format_exception(*e.excinfo):
tw.line(line.rstrip(), red=True)
tw.line("ERROR: could not load %s\n" % (e.path), red=True)
return 4
else:
try:
return config.hook.pytest_cmdline_main(config=config)
finally:
config._ensure_unconfigure()
except UsageError as e:
for msg in e.args:
sys.stderr.write("ERROR: %s\n" % (msg,))
return 4
class cmdline: # compatibility namespace
main = staticmethod(main)
class UsageError(Exception):
""" error in pytest usage or invocation"""
class PrintHelp(Exception):
"""Raised when pytest should print it's help to skip the rest of the
argument parsing and validation."""
pass
def filename_arg(path, optname):
""" Argparse type validator for filename arguments.
:path: path of filename
:optname: name of the option
"""
if os.path.isdir(path):
raise UsageError("{0} must be a filename, given: {1}".format(optname, path))
return path
def directory_arg(path, optname):
"""Argparse type validator for directory arguments.
:path: path of directory
:optname: name of the option
"""
if not os.path.isdir(path):
raise UsageError("{0} must be a directory, given: {1}".format(optname, path))
return path
_preinit = []
default_plugins = (
"mark main terminal runner python fixtures debugging unittest capture skipping "
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion "
"junitxml resultlog doctest cacheprovider freeze_support "
"setuponly setupplan warnings").split()
builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester")
def _preloadplugins():
assert not _preinit
_preinit.append(get_config())
def get_config():
if _preinit:
return _preinit.pop(0)
# subsequent calls to main will create a fresh instance
pluginmanager = PytestPluginManager()
config = Config(pluginmanager)
for spec in default_plugins:
pluginmanager.import_plugin(spec)
return config
def get_plugin_manager():
"""
Obtain a new instance of the
:py:class:`_pytest.config.PytestPluginManager`, with default plugins
already loaded.
This function can be used by integration with other tools, like hooking
into pytest to run tests into an IDE.
"""
return get_config().pluginmanager
def _prepareconfig(args=None, plugins=None):
warning = None
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, str):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args, posix=sys.platform != "win32")
from _pytest import deprecated
warning = deprecated.MAIN_STR_ARGS
config = get_config()
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, py.builtin._basestring):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
if warning:
config.warn('C1', warning)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args)
except BaseException:
config._ensure_unconfigure()
raise
class PytestPluginManager(PluginManager):
"""
Overwrites :py:class:`pluggy.PluginManager <_pytest.vendored_packages.pluggy.PluginManager>` to add pytest-specific
functionality:
* loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
``pytest_plugins`` global variables found in plugins being loaded;
* ``conftest.py`` loading during start-up;
"""
def __init__(self):
super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_")
self._conftest_plugins = set()
# state related to local conftest plugins
self._path2confmods = {}
self._conftestpath2mod = {}
self._confcutdir = None
self._noconftest = False
self._duplicatepaths = set()
self.add_hookspecs(_pytest.hookspec)
self.register(self)
if os.environ.get('PYTEST_DEBUG'):
err = sys.stderr
encoding = getattr(err, 'encoding', 'utf8')
try:
err = py.io.dupfile(err, encoding=encoding)
except Exception:
pass
self.trace.root.setwriter(err.write)
self.enable_tracing()
# Config._consider_importhook will set a real object if required.
self.rewrite_hook = _pytest.assertion.DummyRewriteHook()
def addhooks(self, module_or_class):
"""
.. deprecated:: 2.8
Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>`
instead.
"""
warning = dict(code="I2",
fslocation=_pytest._code.getfslineno(sys._getframe(1)),
nodeid=None,
message="use pluginmanager.add_hookspecs instead of "
"deprecated addhooks() method.")
self._warn(warning)
return self.add_hookspecs(module_or_class)
def parse_hookimpl_opts(self, plugin, name):
# pytest hooks are always prefixed with pytest_
# so we avoid accessing possibly non-readable attributes
# (see issue #1073)
if not name.startswith("pytest_"):
return
# ignore some historic special names which can not be hooks anyway
if name == "pytest_plugins" or name.startswith("pytest_funcarg__"):
return
method = getattr(plugin, name)
opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)
if opts is not None:
for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
opts.setdefault(name, hasattr(method, name))
return opts
def parse_hookspec_opts(self, module_or_class, name):
opts = super(PytestPluginManager, self).parse_hookspec_opts(
module_or_class, name)
if opts is None:
method = getattr(module_or_class, name)
if name.startswith("pytest_"):
opts = {"firstresult": hasattr(method, "firstresult"),
"historic": hasattr(method, "historic")}
return opts
def _verify_hook(self, hook, hookmethod):
super(PytestPluginManager, self)._verify_hook(hook, hookmethod)
if "__multicall__" in hookmethod.argnames:
fslineno = _pytest._code.getfslineno(hookmethod.function)
warning = dict(code="I1",
fslocation=fslineno,
nodeid=None,
message="%r hook uses deprecated __multicall__ "
"argument" % (hook.name))
self._warn(warning)
def register(self, plugin, name=None):
ret = super(PytestPluginManager, self).register(plugin, name)
if ret:
self.hook.pytest_plugin_registered.call_historic(
kwargs=dict(plugin=plugin, manager=self))
if isinstance(plugin, types.ModuleType):
self.consider_module(plugin)
return ret
def getplugin(self, name):
# support deprecated naming because plugins (xdist e.g.) use it
return self.get_plugin(name)
def hasplugin(self, name):
"""Return True if the plugin with the given name is registered."""
return bool(self.get_plugin(name))
def pytest_configure(self, config):
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
# we should remove tryfirst/trylast as markers
config.addinivalue_line("markers",
"tryfirst: mark a hook implementation function such that the "
"plugin machinery will try to call it first/as early as possible.")
config.addinivalue_line("markers",
"trylast: mark a hook implementation function such that the "
"plugin machinery will try to call it last/as late as possible.")
def _warn(self, message):
kwargs = message if isinstance(message, dict) else {
'code': 'I1',
'message': message,
'fslocation': None,
'nodeid': None,
}
self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
#
# internal API for local conftest plugin handling
#
def _set_initial_conftests(self, namespace):
""" load initial conftest files given a preparsed "namespace".
As conftest files may add their own command line options
which have arguments ('--my-opt somepath') we might get some
false positives. All builtin and 3rd party plugins will have
been loaded, however, so common options will not confuse our logic
here.
"""
current = py.path.local()
self._confcutdir = current.join(namespace.confcutdir, abs=True) \
if namespace.confcutdir else None
self._noconftest = namespace.noconftest
testpaths = namespace.file_or_dir
foundanchor = False
for path in testpaths:
path = str(path)
# remove node-id syntax
i = path.find("::")
if i != -1:
path = path[:i]
anchor = current.join(path, abs=1)
if exists(anchor): # we found some file object
self._try_load_conftest(anchor)
foundanchor = True
if not foundanchor:
self._try_load_conftest(current)
def _try_load_conftest(self, anchor):
self._getconftestmodules(anchor)
# let's also consider test* subdirs
if anchor.check(dir=1):
for x in anchor.listdir("test*"):
if x.check(dir=1):
self._getconftestmodules(x)
def _getconftestmodules(self, path):
if self._noconftest:
return []
try:
return self._path2confmods[path]
except KeyError:
if path.isfile():
clist = self._getconftestmodules(path.dirpath())
else:
# XXX these days we may rather want to use config.rootdir
# and allow users to opt into looking into the rootdir parent
# directories instead of requiring to specify confcutdir
clist = []
for parent in path.parts():
if self._confcutdir and self._confcutdir.relto(parent):
continue
conftestpath = parent.join("conftest.py")
if conftestpath.isfile():
mod = self._importconftest(conftestpath)
clist.append(mod)
self._path2confmods[path] = clist
return clist
def _rget_with_confmod(self, name, path):
modules = self._getconftestmodules(path)
for mod in reversed(modules):
try:
return mod, getattr(mod, name)
except AttributeError:
continue
raise KeyError(name)
def _importconftest(self, conftestpath):
try:
return self._conftestpath2mod[conftestpath]
except KeyError:
pkgpath = conftestpath.pypkgpath()
if pkgpath is None:
_ensure_removed_sysmodule(conftestpath.purebasename)
try:
mod = conftestpath.pyimport()
except Exception:
raise ConftestImportFailure(conftestpath, sys.exc_info())
self._conftest_plugins.add(mod)
self._conftestpath2mod[conftestpath] = mod
dirpath = conftestpath.dirpath()
if dirpath in self._path2confmods:
for path, mods in self._path2confmods.items():
if path and path.relto(dirpath) or path == dirpath:
assert mod not in mods
mods.append(mod)
self.trace("loaded conftestmodule %r" % (mod))
self.consider_conftest(mod)
return mod
#
# API for bootstrapping plugin loading
#
#
def consider_preparse(self, args):
for opt1, opt2 in zip(args, args[1:]):
if opt1 == "-p":
self.consider_pluginarg(opt2)
def consider_pluginarg(self, arg):
if arg.startswith("no:"):
name = arg[3:]
self.set_blocked(name)
if not name.startswith("pytest_"):
self.set_blocked("pytest_" + name)
else:
self.import_plugin(arg)
def consider_conftest(self, conftestmodule):
self.register(conftestmodule, name=conftestmodule.__file__)
def consider_env(self):
self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
def consider_module(self, mod):
self._import_plugin_specs(getattr(mod, 'pytest_plugins', []))
def _import_plugin_specs(self, spec):
plugins = _get_plugin_specs_as_list(spec)
for import_spec in plugins:
self.import_plugin(import_spec)
def import_plugin(self, modname):
# most often modname refers to builtin modules, e.g. "pytester",
# "terminal" or "capture". Those plugins are registered under their
# basename for historic purposes but must be imported with the
# _pytest prefix.
assert isinstance(modname, (py.builtin.text, str)), "module name as text required, got %r" % modname
modname = str(modname)
if self.get_plugin(modname) is not None:
return
if modname in builtin_plugins:
importspec = "_pytest." + modname
else:
importspec = modname
self.rewrite_hook.mark_rewrite(importspec)
try:
__import__(importspec)
except ImportError as e:
new_exc = ImportError('Error importing plugin "%s": %s' % (modname, safe_str(e.args[0])))
# copy over name and path attributes
for attr in ('name', 'path'):
if hasattr(e, attr):
setattr(new_exc, attr, getattr(e, attr))
raise new_exc
except Exception as e:
import pytest
if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
raise
self._warn("skipped plugin %r: %s" % ((modname, e.msg)))
else:
mod = sys.modules[importspec]
self.register(mod, modname)
def _get_plugin_specs_as_list(specs):
"""
Parses a list of "plugin specs" and returns a list of plugin names.
Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in
which case it is returned as a list. Specs can also be `None` in which case an
empty list is returned.
"""
if specs is not None:
if isinstance(specs, str):
specs = specs.split(',') if specs else []
if not isinstance(specs, (list, tuple)):
raise UsageError("Plugin specs must be a ','-separated string or a "
"list/tuple of strings for plugin names. Given: %r" % specs)
return list(specs)
return []
class Parser:
""" Parser for command line arguments and ini-file values.
:ivar extra_info: dict of generic param -> value to display in case
there's an error processing the command line arguments.
"""
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
self._groups = []
self._processopt = processopt
self._usage = usage
self._inidict = {}
self._ininames = []
self.extra_info = {}
def processoption(self, option):
if self._processopt:
if option.dest:
self._processopt(option)
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
:name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
The returned group object has an ``addoption`` method with the same
signature as :py:func:`parser.addoption
<_pytest.config.Parser.addoption>` but will be shown in the
respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
return group
group = OptionGroup(name, description, parser=self)
i = 0
for i, grp in enumerate(self._groups):
if grp.name == after:
break
self._groups.insert(i + 1, group)
return group
def addoption(self, *opts, **attrs):
""" register a command line option.
:opts: option names, can be short or long options.
:attrs: same attributes which the ``add_option()`` function of the
`argparse library
<http://docs.python.org/2/library/argparse.html>`_
accepts.
After command line parsing options are available on the pytest config
object via ``config.option.NAME`` where ``NAME`` is usually set
by passing a ``dest`` attribute, for example
``addoption("--long", dest="NAME", ...)``.
"""
self._anonymous.addoption(*opts, **attrs)
def parse(self, args, namespace=None):
from _pytest._argcomplete import try_argcomplete
self.optparser = self._getparser()
try_argcomplete(self.optparser)
return self.optparser.parse_args([str(x) for x in args], namespace=namespace)
def _getparser(self):
from _pytest._argcomplete import filescompleter
optparser = MyOptionParser(self, self.extra_info)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
arggroup = optparser.add_argument_group(desc)
for option in group.options:
n = option.names()
a = option.attrs()
arggroup.add_argument(*n, **a)
# bash like autocompletion for dirs (appending '/')
optparser.add_argument(FILE_OR_DIR, nargs='*').completer = filescompleter
return optparser
def parse_setoption(self, args, option, namespace=None):
parsedoption = self.parse(args, namespace=namespace)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
return getattr(parsedoption, FILE_OR_DIR)
def parse_known_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments at this
point.
"""
return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
def parse_known_and_unknown_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments, and
the remaining arguments unknown at this point.
"""
optparser = self._getparser()
args = [str(x) for x in args]
return optparser.parse_known_args(args, namespace=namespace)
def addini(self, name, help, type=None, default=None):
""" register an ini-file option.
:name: name of the ini-variable
:type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
or ``bool``.
:default: default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`.
"""
assert type in (None, "pathlist", "args", "linelist", "bool")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
class ArgumentError(Exception):
"""
Raised if an Argument instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class Argument:
"""class that mimics the necessary behaviour of optparse.Option
its currently a least effort implementation
and ignoring choices and integer prefixes
https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
"""
_typ_map = {
'int': int,
'string': str,
'float': float,
'complex': complex,
}
def __init__(self, *names, **attrs):
"""store parms in private vars for use in add_argument"""
self._attrs = attrs
self._short_opts = []
self._long_opts = []
self.dest = attrs.get('dest')
if '%default' in (attrs.get('help') or ''):
warnings.warn(
'pytest now uses argparse. "%default" should be'
' changed to "%(default)s" ',
DeprecationWarning,
stacklevel=3)
try:
typ = attrs['type']
except KeyError:
pass
else:
# this might raise a keyerror as well, don't want to catch that
if isinstance(typ, py.builtin._basestring):
if typ == 'choice':
warnings.warn(
'type argument to addoption() is a string %r.'
' For parsearg this is optional and when supplied'
' should be a type.'
' (options: %s)' % (typ, names),
DeprecationWarning,
stacklevel=3)
# argparse expects a type here take it from
# the type of the first element
attrs['type'] = type(attrs['choices'][0])
else:
warnings.warn(
'type argument to addoption() is a string %r.'
' For parsearg this should be a type.'
' (options: %s)' % (typ, names),
DeprecationWarning,
stacklevel=3)
attrs['type'] = Argument._typ_map[typ]
# used in test_parseopt -> test_parse_defaultgetter
self.type = attrs['type']
else:
self.type = typ
try:
# attribute existence is tested in Config._processopt
self.default = attrs['default']
except KeyError:
pass
self._set_opt_strings(names)
if not self.dest:
if self._long_opts:
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
try:
self.dest = self._short_opts[0][1:]
except IndexError:
raise ArgumentError(
'need a long or short option', self)
def names(self):
return self._short_opts + self._long_opts
def attrs(self):
# update any attributes set by processopt
attrs = 'default dest help'.split()
if self.dest:
attrs.append(self.dest)
for attr in attrs:
try:
self._attrs[attr] = getattr(self, attr)
except AttributeError:
pass
if self._attrs.get('help'):
a = self._attrs['help']
a = a.replace('%default', '%(default)s')
# a = a.replace('%prog', '%(prog)s')
self._attrs['help'] = a
return self._attrs
def _set_opt_strings(self, opts):
"""directly from optparse
might not be necessary as this is passed to argparse later on"""
for opt in opts:
if len(opt) < 2:
raise ArgumentError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise ArgumentError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise ArgumentError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def __repr__(self):
args = []
if self._short_opts:
args += ['_short_opts: ' + repr(self._short_opts)]
if self._long_opts:
args += ['_long_opts: ' + repr(self._long_opts)]
args += ['dest: ' + repr(self.dest)]
if hasattr(self, 'type'):
args += ['type: ' + repr(self.type)]
if hasattr(self, 'default'):
args += ['default: ' + repr(self.default)]
return 'Argument({0})'.format(', '.join(args))
class OptionGroup:
def __init__(self, name, description="", parser=None):
self.name = name
self.description = description
self.options = []
self.parser = parser
def addoption(self, *optnames, **attrs):
""" add an option to this group.
if a shortened version of a long option is specified it will
be suppressed in the help. addoption('--twowords', '--two-words')
results in help showing '--two-words' only, but --twowords gets
accepted **and** the automatic destination is in args.twowords
"""
conflict = set(optnames).intersection(
name for opt in self.options for name in opt.names())
if conflict:
raise ValueError("option names %s already added" % conflict)
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
def _addoption_instance(self, option, shortupper=False):
if not shortupper:
for opt in option._short_opts:
if opt[0] == '-' and opt[1].islower():
raise ValueError("lowercase shortoptions reserved")
if self.parser:
self.parser.processoption(option)
self.options.append(option)
class MyOptionParser(argparse.ArgumentParser):
def __init__(self, parser, extra_info=None):
if not extra_info:
extra_info = {}
self._parser = parser
argparse.ArgumentParser.__init__(self, usage=parser._usage,
add_help=False, formatter_class=DropShorterLongHelpFormatter)
# extra_info is a dict of (param -> value) to display if there's
# an usage error to provide more contextual information to the user
self.extra_info = extra_info
def parse_args(self, args=None, namespace=None):
"""allow splitting of positional arguments"""
args, argv = self.parse_known_args(args, namespace)
if argv:
for arg in argv:
if arg and arg[0] == '-':
lines = ['unrecognized arguments: %s' % (' '.join(argv))]
for k, v in sorted(self.extra_info.items()):
lines.append(' %s: %s' % (k, v))
self.error('\n'.join(lines))
getattr(args, FILE_OR_DIR).extend(argv)
return args
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
"""shorten help for long options that differ only in extra hyphens
- collapse **long** options that are the same except for extra hyphens
- special action attribute map_long_option allows surpressing additional
long options
- shortcut if there are only two options and one of them is a short one
- cache result on action object as this is called at least 2 times
"""
def _format_action_invocation(self, action):
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
if orgstr and orgstr[0] != '-': # only optional arguments
return orgstr
res = getattr(action, '_formatted_action_invocation', None)
if res:
return res
options = orgstr.split(', ')
if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
# a shortcut for '-h, --help' or '--abc', '-a'
action._formatted_action_invocation = orgstr
return orgstr
return_list = []
option_map = getattr(action, 'map_long_option', {})
if option_map is None:
option_map = {}
short_long = {}
for option in options:
if len(option) == 2 or option[2] == ' ':
continue
if not option.startswith('--'):
raise ArgumentError('long optional argument without "--": [%s]'
% (option), self)
xxoption = option[2:]
if xxoption.split()[0] not in option_map:
shortened = xxoption.replace('-', '')
if shortened not in short_long or \
len(short_long[shortened]) < len(xxoption):
short_long[shortened] = xxoption
# now short_long has been filled out to the longest with dashes
# **and** we keep the right option ordering from add_argument
for option in options:
if len(option) == 2 or option[2] == ' ':
return_list.append(option)
if option[2:] == short_long.get(option.replace('-', '')):
return_list.append(option.replace(' ', '=', 1))
action._formatted_action_invocation = ', '.join(return_list)
return action._formatted_action_invocation
def _ensure_removed_sysmodule(modname):
try:
del sys.modules[modname]
except KeyError:
pass
class CmdOptions(object):
""" holds cmdline options as attributes."""
def __init__(self, values=()):
self.__dict__.update(values)
def __repr__(self):
return "<CmdOptions %r>" % (self.__dict__,)
def copy(self):
return CmdOptions(self.__dict__)
class Notset:
def __repr__(self):
return "<NOTSET>"
notset = Notset()
FILE_OR_DIR = 'file_or_dir'
def _iter_rewritable_modules(package_files):
for fn in package_files:
is_simple_module = '/' not in fn and fn.endswith('.py')
is_package = fn.count('/') == 1 and fn.endswith('__init__.py')
if is_simple_module:
module_name, _ = os.path.splitext(fn)
yield module_name
elif is_package:
package_name = os.path.dirname(fn)
yield package_name
class Config(object):
""" access to configuration values, pluginmanager and plugin hooks. """
def __init__(self, pluginmanager):
#: access to command line option as attributes.
#: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
self.option = CmdOptions()
_a = FILE_OR_DIR
self._parser = Parser(
usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
processopt=self._processopt,
)
#: a pluginmanager instance
self.pluginmanager = pluginmanager
self.trace = self.pluginmanager.trace.root.get("config")
self.hook = self.pluginmanager.hook
self._inicache = {}
self._override_ini = ()
self._opt2dest = {}
self._cleanup = []
self._warn = self.pluginmanager._warn
self.pluginmanager.register(self, "pytestconfig")
self._configured = False
def do_setns(dic):
import pytest
setns(pytest, dic)
self.hook.pytest_namespace.call_historic(do_setns, {})
self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
def add_cleanup(self, func):
""" Add a function to be called when the config object gets out of
use (usually coninciding with pytest_unconfigure)."""
self._cleanup.append(func)
def _do_configure(self):
assert not self._configured
self._configured = True
self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
def _ensure_unconfigure(self):
if self._configured:
self._configured = False
self.hook.pytest_unconfigure(config=self)
self.hook.pytest_configure._call_history = []
while self._cleanup:
fin = self._cleanup.pop()
fin()
def warn(self, code, message, fslocation=None, nodeid=None):
""" generate a warning for this test session. """
self.hook.pytest_logwarning.call_historic(kwargs=dict(
code=code, message=message,
fslocation=fslocation, nodeid=nodeid))
def get_terminal_writer(self):
return self.pluginmanager.get_plugin("terminalreporter")._tw
def pytest_cmdline_parse(self, pluginmanager, args):
# REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
self.parse(args)
return self
def notify_exception(self, excinfo, option=None):
if option and option.fulltrace:
style = "long"
else:
style = "native"
excrepr = excinfo.getrepr(funcargs=True,
showlocals=getattr(option, 'showlocals', False),
style=style,
)
res = self.hook.pytest_internalerror(excrepr=excrepr,
excinfo=excinfo)
if not py.builtin.any(res):
for line in str(excrepr).split("\n"):
sys.stderr.write("INTERNALERROR> %s\n" % line)
sys.stderr.flush()
def cwd_relative_nodeid(self, nodeid):
# nodeid's are relative to the rootpath, compute relative to cwd
if self.invocation_dir != self.rootdir:
fullpath = self.rootdir.join(nodeid)
nodeid = self.invocation_dir.bestrelpath(fullpath)
return nodeid
@classmethod
def fromdictargs(cls, option_dict, args):
""" constructor useable for subprocesses. """
config = get_config()
config.option.__dict__.update(option_dict)
config.parse(args, addopts=False)
for x in config.option.plugins:
config.pluginmanager.consider_pluginarg(x)
return config
def _processopt(self, opt):
for name in opt._short_opts + opt._long_opts:
self._opt2dest[name] = opt.dest
if hasattr(opt, 'default') and opt.dest:
if not hasattr(self.option, opt.dest):
setattr(self.option, opt.dest, opt.default)
@hookimpl(trylast=True)
def pytest_load_initial_conftests(self, early_config):
self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
def _initini(self, args):
ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy())
r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn)
self.rootdir, self.inifile, self.inicfg = r
self._parser.extra_info['rootdir'] = self.rootdir
self._parser.extra_info['inifile'] = self.inifile
self.invocation_dir = py.path.local()
self._parser.addini('addopts', 'extra command line options', 'args')
self._parser.addini('minversion', 'minimally required pytest version')
self._override_ini = ns.override_ini or ()
def _consider_importhook(self, args):
"""Install the PEP 302 import hook if using assertion re-writing.
Needs to parse the --assert=<mode> option from the commandline
and find all the installed plugins to mark them for re-writing
by the importhook.
"""
ns, unknown_args = self._parser.parse_known_and_unknown_args(args)
mode = ns.assertmode
if mode == 'rewrite':
try:
hook = _pytest.assertion.install_importhook(self)
except SystemError:
mode = 'plain'
else:
# REMOVED FOR SPACK: This routine imports `pkg_resources` from
# `setuptools`, but we do not need it for Spack. We have removed
# it from Spack to avoid a dependency on setuptools.
# self._mark_plugins_for_rewrite(hook)
pass
self._warn_about_missing_assertion(mode)
def _warn_about_missing_assertion(self, mode):
try:
assert False
except AssertionError:
pass
else:
if mode == 'plain':
sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED"
" and FAILING TESTS WILL PASS. Are you"
" using python -O?")
else:
sys.stderr.write("WARNING: assertions not in test modules or"
" plugins will be ignored"
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n")
def _preparse(self, args, addopts=True):
self._initini(args)
if addopts:
args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
args[:] = self.getini("addopts") + args
self._checkversion()
self._consider_importhook(args)
self.pluginmanager.consider_preparse(args)
# REMOVED FOR SPACK: This routine imports `pkg_resources` from
# `setuptools`, but we do not need it for Spack. We have removed
# it from Spack to avoid a dependency on setuptools.
# self.pluginmanager.load_setuptools_entrypoints('pytest11')
self.pluginmanager.consider_env()
self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
if self.known_args_namespace.confcutdir is None and self.inifile:
confcutdir = py.path.local(self.inifile).dirname
self.known_args_namespace.confcutdir = confcutdir
try:
self.hook.pytest_load_initial_conftests(early_config=self,
args=args, parser=self._parser)
except ConftestImportFailure:
e = sys.exc_info()[1]
if ns.help or ns.version:
# we don't want to prevent --help/--version to work
# so just let is pass and print a warning at the end
self._warn("could not load initial conftests (%s)\n" % e.path)
else:
raise
def _checkversion(self):
import pytest
minver = self.inicfg.get('minversion', None)
if minver:
ver = minver.split(".")
myver = pytest.__version__.split(".")
if myver < ver:
raise pytest.UsageError(
"%s:%d: requires pytest-%s, actual pytest-%s'" % (
self.inicfg.config.path, self.inicfg.lineof('minversion'),
minver, pytest.__version__))
def parse(self, args, addopts=True):
# parse given cmdline arguments into this config object.
assert not hasattr(self, 'args'), (
"can only parse cmdline args at most once per Config object")
self._origargs = args
self.hook.pytest_addhooks.call_historic(
kwargs=dict(pluginmanager=self.pluginmanager))
self._preparse(args, addopts=addopts)
# XXX deprecated hook:
self.hook.pytest_cmdline_preparse(config=self, args=args)
self._parser.after_preparse = True
try:
args = self._parser.parse_setoption(args, self.option, namespace=self.option)
if not args:
cwd = os.getcwd()
if cwd == self.rootdir:
args = self.getini('testpaths')
if not args:
args = [cwd]
self.args = args
except PrintHelp:
pass
def addinivalue_line(self, name, line):
""" add a line to an ini-file option. The option must have been
declared but might not yet be set in which case the line becomes the
the first line in its value. """
x = self.getini(name)
assert isinstance(x, list)
x.append(line) # modifies the cached list inline
def getini(self, name):
""" return configuration value from an :ref:`ini file <inifiles>`. If the
specified name hasn't been registered through a prior
:py:func:`parser.addini <_pytest.config.Parser.addini>`
call (usually from a plugin), a ValueError is raised. """
try:
return self._inicache[name]
except KeyError:
self._inicache[name] = val = self._getini(name)
return val
def _getini(self, name):
try:
description, type, default = self._parser._inidict[name]
except KeyError:
raise ValueError("unknown configuration value: %r" % (name,))
value = self._get_override_ini_value(name)
if value is None:
try:
value = self.inicfg[name]
except KeyError:
if default is not None:
return default
if type is None:
return ''
return []
if type == "pathlist":
dp = py.path.local(self.inicfg.config.path).dirpath()
values = []
for relpath in shlex.split(value):
values.append(dp.join(relpath, abs=True))
return values
elif type == "args":
return shlex.split(value)
elif type == "linelist":
return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
elif type == "bool":
return bool(_strtobool(value.strip()))
else:
assert type is None
return value
def _getconftest_pathlist(self, name, path):
try:
mod, relroots = self.pluginmanager._rget_with_confmod(name, path)
except KeyError:
return None
modpath = py.path.local(mod.__file__).dirpath()
values = []
for relroot in relroots:
if not isinstance(relroot, py.path.local):
relroot = relroot.replace("/", py.path.local.sep)
relroot = modpath.join(relroot, abs=True)
values.append(relroot)
return values
def _get_override_ini_value(self, name):
value = None
# override_ini is a list of list, to support both -o foo1=bar1 foo2=bar2 and
# and -o foo1=bar1 -o foo2=bar2 options
# always use the last item if multiple value set for same ini-name,
# e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2
for ini_config_list in self._override_ini:
for ini_config in ini_config_list:
try:
(key, user_ini_value) = ini_config.split("=", 1)
except ValueError:
raise UsageError("-o/--override-ini expects option=value style.")
if key == name:
value = user_ini_value
return value
def getoption(self, name, default=notset, skip=False):
""" return command line option value.
:arg name: name of the option. You may also specify
the literal ``--OPT`` option instead of the "dest" option name.
:arg default: default value if no option of that name exists.
:arg skip: if True raise pytest.skip if option does not exists
or has a None value.
"""
name = self._opt2dest.get(name, name)
try:
val = getattr(self.option, name)
if val is None and skip:
raise AttributeError(name)
return val
except AttributeError:
if default is not notset:
return default
if skip:
import pytest
pytest.skip("no %r option found" % (name,))
raise ValueError("no option named %r" % (name,))
def getvalue(self, name, path=None):
""" (deprecated, use getoption()) """
return self.getoption(name)
def getvalueorskip(self, name, path=None):
""" (deprecated, use getoption(skip=True)) """
return self.getoption(name, skip=True)
def exists(path, ignore=EnvironmentError):
try:
return path.check()
except ignore:
return False
def getcfg(args, warnfunc=None):
"""
Search the list of arguments for a valid ini-file for pytest,
and return a tuple of (rootdir, inifile, cfg-dict).
note: warnfunc is an optional function used to warn
about ini-files that use deprecated features.
This parameter should be removed when pytest
adopts standard deprecation warnings (#1804).
"""
from _pytest.deprecated import SETUP_CFG_PYTEST
inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [py.path.local()]
for arg in args:
arg = py.path.local(arg)
for base in arg.parts(reverse=True):
for inibasename in inibasenames:
p = base.join(inibasename)
if exists(p):
iniconfig = py.iniconfig.IniConfig(p)
if 'pytest' in iniconfig.sections:
if inibasename == 'setup.cfg' and warnfunc:
warnfunc('C1', SETUP_CFG_PYTEST)
return base, p, iniconfig['pytest']
if inibasename == 'setup.cfg' and 'tool:pytest' in iniconfig.sections:
return base, p, iniconfig['tool:pytest']
elif inibasename == "pytest.ini":
# allowed to be empty
return base, p, {}
return None, None, None
def get_common_ancestor(paths):
common_ancestor = None
for path in paths:
if not path.exists():
continue
if common_ancestor is None:
common_ancestor = path
else:
if path.relto(common_ancestor) or path == common_ancestor:
continue
elif common_ancestor.relto(path):
common_ancestor = path
else:
shared = path.common(common_ancestor)
if shared is not None:
common_ancestor = shared
if common_ancestor is None:
common_ancestor = py.path.local()
elif common_ancestor.isfile():
common_ancestor = common_ancestor.dirpath()
return common_ancestor
def get_dirs_from_args(args):
def is_option(x):
return str(x).startswith('-')
def get_file_part_from_node_id(x):
return str(x).split('::')[0]
def get_dir_from_path(path):
if path.isdir():
return path
return py.path.local(path.dirname)
# These look like paths but may not exist
possible_paths = (
py.path.local(get_file_part_from_node_id(arg))
for arg in args
if not is_option(arg)
)
return [
get_dir_from_path(path)
for path in possible_paths
if path.exists()
]
def determine_setup(inifile, args, warnfunc=None):
dirs = get_dirs_from_args(args)
if inifile:
iniconfig = py.iniconfig.IniConfig(inifile)
try:
inicfg = iniconfig["pytest"]
except KeyError:
inicfg = None
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inifile, inicfg = getcfg([ancestor], warnfunc=warnfunc)
if rootdir is None:
for rootdir in ancestor.parts(reverse=True):
if rootdir.join("setup.py").exists():
break
else:
rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc)
if rootdir is None:
rootdir = get_common_ancestor([py.path.local(), ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == '/'
if is_fs_root:
rootdir = ancestor
return rootdir, inifile, inicfg or {}
def setns(obj, dic):
import pytest
for name, value in dic.items():
if isinstance(value, dict):
mod = getattr(obj, name, None)
if mod is None:
modname = "pytest.%s" % name
mod = types.ModuleType(modname)
sys.modules[modname] = mod
mod.__all__ = []
setattr(obj, name, mod)
obj.__all__.append(name)
setns(mod, value)
else:
setattr(obj, name, value)
obj.__all__.append(name)
# if obj != pytest:
# pytest.__all__.append(name)
setattr(pytest, name, value)
def create_terminal_writer(config, *args, **kwargs):
"""Create a TerminalWriter instance configured according to the options
in the config object. Every code which requires a TerminalWriter object
and has access to a config object should use this function.
"""
tw = py.io.TerminalWriter(*args, **kwargs)
if config.option.color == 'yes':
tw.hasmarkup = True
if config.option.color == 'no':
tw.hasmarkup = False
return tw
def _strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
.. note:: copied from distutils.util
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
| lgpl-2.1 |
mattrobenolt/django | django/db/models/options.py | 3 | 36340 | from __future__ import unicode_literals
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import connections
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import ManyToManyField
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name',
'required_db_features', 'required_db_vendor')
class raise_deprecation(object):
def __init__(self, suggested_alternative):
self.suggested_alternative = suggested_alternative
def __call__(self, fn):
def wrapper(*args, **kwargs):
warnings.warn(
"'%s is an unofficial API that has been deprecated. "
"You may be able to replace it with '%s'" % (
fn.__name__,
self.suggested_alternative,
),
RemovedInDjango20Warning, stacklevel=2
)
return fn(*args, **kwargs)
return wrapper
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = ('fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map')
REVERSE_PROPERTIES = ('related_objects', 'fields_map', '_relation_tree')
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.proxied_children = []
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract(=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = apps
self.default_related_name = None
@lru_cache(maxsize=None)
def _map_model(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# It maps a field to (field, model or related_model,) depending on the
# field type.
model = link.model._meta.concrete_model
if model is self.model:
model = None
return link, model
@lru_cache(maxsize=None)
def _map_model_details(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# This function maps a field to a tuple of:
# (field, model or related_model, direct, is_m2m) depending on the
# field type.
direct = not link.auto_created or link.concrete
model = link.model._meta.concrete_model
if model is self.model:
model = None
m2m = link.is_relation and link.many_to_many
return link, model, direct, m2m
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field, virtual=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if virtual:
self.virtual_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, six.string_types):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
model_label = '%s.%s' % (self.app_label, self.model_name)
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
return swapped_for
return None
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not virtual or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
is_not_an_m2m_field = lambda f: not (f.is_relation and f.many_to_many)
is_not_a_generic_relation = lambda f: not (f.is_relation and f.one_to_many)
is_not_a_generic_foreign_key = lambda f: not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False) if
is_not_an_m2m_field(f) and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_fields_with_model(self):
return [self._map_model(f) for f in self.get_fields()]
@raise_deprecation(suggested_alternative="get_fields()")
def get_concrete_fields_with_model(self):
return [self._map_model(f) for f in self.concrete_fields]
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_m2m_with_model(self):
return [self._map_model(f) for f in self.many_to_many]
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name, many_to_many=None):
"""
Returns a field instance given a field name. The field can be either a
forward or reverse field, unless many_to_many is specified; if it is,
only forward fields will be returned.
The many_to_many argument exists for backwards compatibility reasons;
it has been deprecated and will be removed in Django 2.0.
"""
m2m_in_kwargs = many_to_many is not None
if m2m_in_kwargs:
# Always throw a warning if many_to_many is used regardless of
# whether it alters the return type or not.
warnings.warn(
"The 'many_to_many' argument on get_field() is deprecated; "
"use a filter on field.many_to_many instead.",
RemovedInDjango20Warning
)
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
field = self._forward_fields_map[field_name]
if many_to_many is False and field.many_to_many:
raise FieldDoesNotExist(
'%s has no field named %r' % (self.object_name, field_name)
)
return field
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named %r. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
if m2m_in_kwargs:
# Previous API does not allow searching reverse fields.
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
@raise_deprecation(suggested_alternative="get_field()")
def get_field_by_name(self, name):
return self._map_model_details(self.get_field(name))
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_field_names(self):
names = set()
fields = self.get_fields()
for field in fields:
# For backwards compatibility GenericForeignKey should not be
# included in the results.
if field.is_relation and field.many_to_one and field.related_model is None:
continue
# Relations to child proxy models should not be included.
if (field.model != self.model and
field.model._meta.concrete_model == self.concrete_model):
continue
names.add(field.name)
if hasattr(field, 'attname'):
names.add(field.attname)
return list(names)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
include_parents = True if local_only is False else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents,
include_hidden=include_hidden,
)
fields = (obj for obj in fields if not isinstance(obj.field, ManyToManyField))
if include_proxy_eq:
children = chain.from_iterable(c._relation_tree
for c in self.concrete_model._meta.proxied_children
if c is not self)
relations = (f.remote_field for f in children
if include_hidden or not f.remote_field.field.remote_field.is_hidden())
fields = chain(fields, relations)
return list(fields)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects_with_model(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [
self._map_model(f) for f in self.get_all_related_objects(
local_only=local_only,
include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq,
)
]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_many_to_many_objects(self, local_only=False):
include_parents = True if local_only is not True else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents, include_hidden=True
)
return [obj for obj in fields if isinstance(obj.field, ManyToManyField)]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_m2m_objects_with_model(self):
fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return [self._map_model(obj) for obj in fields if isinstance(obj.field, ManyToManyField)]
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a grandparent or even more distant relation.
"""
if not self.parents:
return None
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return None
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if model._meta.abstract:
continue
fields_with_relations = (
f for f in model._meta._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, six.string_types):
related_objects_graph[f.remote_field.model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
properties_to_expire = []
if forward:
properties_to_expire.extend(self.FORWARD_PROPERTIES)
if reverse and not self.abstract:
properties_to_expire.extend(self.REVERSE_PROPERTIES)
for cache_key in properties_to_expire:
try:
delattr(self, cache_key)
except AttributeError:
pass
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default will only
return forward fields. This can be changed by enabling or disabling
field types using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if hasattr(obj, 'parent_link') and obj.parent_link:
continue
fields.append(obj)
if reverse:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Virtual fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the virtual fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.virtual_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
| bsd-3-clause |
andykimpe/chromium-test-npapi | tools/json_schema_compiler/preview.py | 44 | 12765 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Server for viewing the compiled C++ code from tools/json_schema_compiler.
"""
import cc_generator
import code
import cpp_type_generator
import cpp_util
import h_generator
import idl_schema
import json_schema
import model
import optparse
import os
import schema_loader
import urlparse
from highlighters import (
pygments_highlighter, none_highlighter, hilite_me_highlighter)
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class CompilerHandler(BaseHTTPRequestHandler):
"""A HTTPRequestHandler that outputs the result of tools/json_schema_compiler.
"""
def do_GET(self):
parsed_url = urlparse.urlparse(self.path)
request_path = self._GetRequestPath(parsed_url)
chromium_favicon = 'http://codereview.chromium.org/static/favicon.ico'
head = code.Code()
head.Append('<link rel="icon" href="%s">' % chromium_favicon)
head.Append('<link rel="shortcut icon" href="%s">' % chromium_favicon)
body = code.Code()
try:
if os.path.isdir(request_path):
self._ShowPanels(parsed_url, head, body)
else:
self._ShowCompiledFile(parsed_url, head, body)
finally:
self.wfile.write('<html><head>')
self.wfile.write(head.Render())
self.wfile.write('</head><body>')
self.wfile.write(body.Render())
self.wfile.write('</body></html>')
def _GetRequestPath(self, parsed_url, strip_nav=False):
"""Get the relative path from the current directory to the requested file.
"""
path = parsed_url.path
if strip_nav:
path = parsed_url.path.replace('/nav', '')
return os.path.normpath(os.curdir + path)
def _ShowPanels(self, parsed_url, head, body):
"""Show the previewer frame structure.
Code panes are populated via XHR after links in the nav pane are clicked.
"""
(head.Append('<style>')
.Append('body {')
.Append(' margin: 0;')
.Append('}')
.Append('.pane {')
.Append(' height: 100%;')
.Append(' overflow-x: auto;')
.Append(' overflow-y: scroll;')
.Append(' display: inline-block;')
.Append('}')
.Append('#nav_pane {')
.Append(' width: 20%;')
.Append('}')
.Append('#nav_pane ul {')
.Append(' list-style-type: none;')
.Append(' padding: 0 0 0 1em;')
.Append('}')
.Append('#cc_pane {')
.Append(' width: 40%;')
.Append('}')
.Append('#h_pane {')
.Append(' width: 40%;')
.Append('}')
.Append('</style>')
)
body.Append(
'<div class="pane" id="nav_pane">%s</div>'
'<div class="pane" id="h_pane"></div>'
'<div class="pane" id="cc_pane"></div>' %
self._RenderNavPane(parsed_url.path[1:])
)
# The Javascript that interacts with the nav pane and panes to show the
# compiled files as the URL or highlighting options change.
body.Append('''<script type="text/javascript">
// Calls a function for each highlighter style <select> element.
function forEachHighlighterStyle(callback) {
var highlighterStyles =
document.getElementsByClassName('highlighter_styles');
for (var i = 0; i < highlighterStyles.length; ++i)
callback(highlighterStyles[i]);
}
// Called when anything changes, such as the highlighter or hashtag.
function updateEverything() {
var highlighters = document.getElementById('highlighters');
var highlighterName = highlighters.value;
// Cache in localStorage for when the page loads next.
localStorage.highlightersValue = highlighterName;
// Show/hide the highlighter styles.
var highlighterStyleName = '';
forEachHighlighterStyle(function(highlighterStyle) {
if (highlighterStyle.id === highlighterName + '_styles') {
highlighterStyle.removeAttribute('style')
highlighterStyleName = highlighterStyle.value;
} else {
highlighterStyle.setAttribute('style', 'display:none')
}
// Cache in localStorage for when the page next loads.
localStorage[highlighterStyle.id + 'Value'] = highlighterStyle.value;
});
// Populate the code panes.
function populateViaXHR(elementId, requestPath) {
var xhr = new XMLHttpRequest();
xhr.onreadystatechange = function() {
if (xhr.readyState != 4)
return;
if (xhr.status != 200) {
alert('XHR error to ' + requestPath);
return;
}
document.getElementById(elementId).innerHTML = xhr.responseText;
};
xhr.open('GET', requestPath, true);
xhr.send();
}
var targetName = window.location.hash;
targetName = targetName.substring('#'.length);
targetName = targetName.split('.', 1)[0]
if (targetName !== '') {
var basePath = window.location.pathname;
var query = 'highlighter=' + highlighterName + '&' +
'style=' + highlighterStyleName;
populateViaXHR('h_pane', basePath + '/' + targetName + '.h?' + query);
populateViaXHR('cc_pane', basePath + '/' + targetName + '.cc?' + query);
}
}
// Initial load: set the values of highlighter and highlighterStyles from
// localStorage.
(function() {
var cachedValue = localStorage.highlightersValue;
if (cachedValue)
document.getElementById('highlighters').value = cachedValue;
forEachHighlighterStyle(function(highlighterStyle) {
var cachedValue = localStorage[highlighterStyle.id + 'Value'];
if (cachedValue)
highlighterStyle.value = cachedValue;
});
})();
window.addEventListener('hashchange', updateEverything, false);
updateEverything();
</script>''')
def _LoadModel(self, basedir, name):
"""Loads and returns the model for the |name| API from either its JSON or
IDL file, e.g.
name=contextMenus will be loaded from |basedir|/context_menus.json,
name=alarms will be loaded from |basedir|/alarms.idl.
"""
loaders = {
'json': json_schema.Load,
'idl': idl_schema.Load
}
# APIs are referred to like "webRequest" but that's in a file
# "web_request.json" so we need to unixify the name.
unix_name = model.UnixName(name)
for loader_ext, loader_fn in loaders.items():
file_path = '%s/%s.%s' % (basedir, unix_name, loader_ext)
if os.path.exists(file_path):
# For historical reasons these files contain a singleton list with the
# model, so just return that single object.
return (loader_fn(file_path)[0], file_path)
raise ValueError('File for model "%s" not found' % name)
def _ShowCompiledFile(self, parsed_url, head, body):
"""Show the compiled version of a json or idl file given the path to the
compiled file.
"""
api_model = model.Model()
request_path = self._GetRequestPath(parsed_url)
(file_root, file_ext) = os.path.splitext(request_path)
(filedir, filename) = os.path.split(file_root)
try:
# Get main file.
(api_def, file_path) = self._LoadModel(filedir, filename)
namespace = api_model.AddNamespace(api_def, file_path)
type_generator = cpp_type_generator.CppTypeGenerator(
api_model,
schema_loader.SchemaLoader(filedir),
namespace)
# Get the model's dependencies.
for dependency in api_def.get('dependencies', []):
# Dependencies can contain : in which case they don't refer to APIs,
# rather, permissions or manifest keys.
if ':' in dependency:
continue
(api_def, file_path) = self._LoadModel(filedir, dependency)
referenced_namespace = api_model.AddNamespace(api_def, file_path)
if referenced_namespace:
type_generator.AddNamespace(referenced_namespace,
cpp_util.Classname(referenced_namespace.name).lower())
# Generate code
cpp_namespace = 'generated_api_schemas'
if file_ext == '.h':
cpp_code = (h_generator.HGenerator(type_generator, cpp_namespace)
.Generate(namespace).Render())
elif file_ext == '.cc':
cpp_code = (cc_generator.CCGenerator(type_generator, cpp_namespace)
.Generate(namespace).Render())
else:
self.send_error(404, "File not found: %s" % request_path)
return
# Do highlighting on the generated code
(highlighter_param, style_param) = self._GetHighlighterParams(parsed_url)
head.Append('<style>' +
self.server.highlighters[highlighter_param].GetCSS(style_param) +
'</style>')
body.Append(self.server.highlighters[highlighter_param]
.GetCodeElement(cpp_code, style_param))
except IOError:
self.send_error(404, "File not found: %s" % request_path)
return
except (TypeError, KeyError, AttributeError,
AssertionError, NotImplementedError) as error:
body.Append('<pre>')
body.Append('compiler error: %s' % error)
body.Append('Check server log for more details')
body.Append('</pre>')
raise
def _GetHighlighterParams(self, parsed_url):
"""Get the highlighting parameters from a parsed url.
"""
query_dict = urlparse.parse_qs(parsed_url.query)
return (query_dict.get('highlighter', ['pygments'])[0],
query_dict.get('style', ['colorful'])[0])
def _RenderNavPane(self, path):
"""Renders an HTML nav pane.
This consists of a select element to set highlight style, and a list of all
files at |path| with the appropriate onclick handlers to open either
subdirectories or JSON files.
"""
html = code.Code()
# Highlighter chooser.
html.Append('<select id="highlighters" onChange="updateEverything()">')
for name, highlighter in self.server.highlighters.items():
html.Append('<option value="%s">%s</option>' %
(name, highlighter.DisplayName()))
html.Append('</select>')
html.Append('<br/>')
# Style for each highlighter.
# The correct highlighting will be shown by Javascript.
for name, highlighter in self.server.highlighters.items():
styles = sorted(highlighter.GetStyles())
if not styles:
continue
html.Append('<select class="highlighter_styles" id="%s_styles" '
'onChange="updateEverything()">' % name)
for style in styles:
html.Append('<option>%s</option>' % style)
html.Append('</select>')
html.Append('<br/>')
# The files, with appropriate handlers.
html.Append('<ul>')
# Make path point to a non-empty directory. This can happen if a URL like
# http://localhost:8000 is navigated to.
if path == '':
path = os.curdir
# Firstly, a .. link if this isn't the root.
if not os.path.samefile(os.curdir, path):
normpath = os.path.normpath(os.path.join(path, os.pardir))
html.Append('<li><a href="/%s">%s/</a>' % (normpath, os.pardir))
# Each file under path/
for filename in sorted(os.listdir(path)):
full_path = os.path.join(path, filename)
(file_root, file_ext) = os.path.splitext(full_path)
if os.path.isdir(full_path) and not full_path.endswith('.xcodeproj'):
html.Append('<li><a href="/%s/">%s/</a>' % (full_path, filename))
elif file_ext in ['.json', '.idl']:
# cc/h panes will automatically update via the hash change event.
html.Append('<li><a href="#%s">%s</a>' %
(filename, filename))
html.Append('</ul>')
return html.Render()
class PreviewHTTPServer(HTTPServer, object):
def __init__(self, server_address, handler, highlighters):
super(PreviewHTTPServer, self).__init__(server_address, handler)
self.highlighters = highlighters
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Runs a server to preview the json_schema_compiler output.',
usage='usage: %prog [option]...')
parser.add_option('-p', '--port', default='8000',
help='port to run the server on')
(opts, argv) = parser.parse_args()
try:
print('Starting previewserver on port %s' % opts.port)
print('The extension documentation can be found at:')
print('')
print(' http://localhost:%s/chrome/common/extensions/api' % opts.port)
print('')
highlighters = {
'hilite': hilite_me_highlighter.HiliteMeHighlighter(),
'none': none_highlighter.NoneHighlighter()
}
try:
highlighters['pygments'] = pygments_highlighter.PygmentsHighlighter()
except ImportError as e:
pass
server = PreviewHTTPServer(('', int(opts.port)),
CompilerHandler,
highlighters)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
| bsd-3-clause |
NathanW2/QGIS | python/plugins/processing/algs/qgis/TinInterpolation.py | 7 | 8273 | # -*- coding: utf-8 -*-
"""
***************************************************************************
TinInterpolation.py
---------------------
Date : October 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsProcessingUtils,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterEnum,
QgsProcessingParameterNumber,
QgsProcessingParameterExtent,
QgsProcessingParameterRasterDestination,
QgsWkbTypes,
QgsProcessingParameterFeatureSink,
QgsProcessingException,
QgsCoordinateReferenceSystem)
from qgis.analysis import (QgsInterpolator,
QgsTinInterpolator,
QgsGridFileWriter)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ParameterInterpolationData(QgsProcessingParameterDefinition):
def __init__(self, name='', description=''):
super().__init__(name, description)
self.setMetadata({
'widget_wrapper': 'processing.algs.qgis.ui.InterpolationDataWidget.InterpolationDataWidgetWrapper'
})
def type(self):
return 'tin_interpolation_data'
def clone(self):
return ParameterInterpolationData(self.name(), self.description())
@staticmethod
def parseValue(value):
if value is None:
return None
if value == '':
return None
if isinstance(value, str):
return value if value != '' else None
else:
return ParameterInterpolationData.dataToString(value)
@staticmethod
def dataToString(data):
s = ''
for c in data:
s += '{}, {}, {:d}, {:d};'.format(c[0],
c[1],
c[2],
c[3])
return s[:-1]
class TinInterpolation(QgisAlgorithm):
INTERPOLATION_DATA = 'INTERPOLATION_DATA'
METHOD = 'METHOD'
COLUMNS = 'COLUMNS'
ROWS = 'ROWS'
EXTENT = 'EXTENT'
OUTPUT = 'OUTPUT'
TRIANGULATION = 'TRIANGULATION'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'interpolation.png'))
def group(self):
return self.tr('Interpolation')
def groupId(self):
return 'interpolation'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.METHODS = [self.tr('Linear'),
self.tr('Clough-Toucher (cubic)')
]
self.addParameter(ParameterInterpolationData(self.INTERPOLATION_DATA,
self.tr('Input layer(s)')))
self.addParameter(QgsProcessingParameterEnum(self.METHOD,
self.tr('Interpolation method'),
options=self.METHODS,
defaultValue=0))
self.addParameter(QgsProcessingParameterNumber(self.COLUMNS,
self.tr('Number of columns'),
minValue=0, maxValue=10000000, defaultValue=300))
self.addParameter(QgsProcessingParameterNumber(self.ROWS,
self.tr('Number of rows'),
minValue=0, maxValue=10000000, defaultValue=300))
self.addParameter(QgsProcessingParameterExtent(self.EXTENT,
self.tr('Extent'),
optional=False))
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Interpolated')))
triangulation_file_param = QgsProcessingParameterFeatureSink(self.TRIANGULATION,
self.tr('Triangulation'),
type=QgsProcessing.TypeVectorLine,
optional=True)
triangulation_file_param.setCreateByDefault(False)
self.addParameter(triangulation_file_param)
def name(self):
return 'tininterpolation'
def displayName(self):
return self.tr('TIN interpolation')
def processAlgorithm(self, parameters, context, feedback):
interpolationData = ParameterInterpolationData.parseValue(parameters[self.INTERPOLATION_DATA])
method = self.parameterAsEnum(parameters, self.METHOD, context)
columns = self.parameterAsInt(parameters, self.COLUMNS, context)
rows = self.parameterAsInt(parameters, self.ROWS, context)
bbox = self.parameterAsExtent(parameters, self.EXTENT, context)
output = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
if interpolationData is None:
raise QgsProcessingException(
self.tr('You need to specify at least one input layer.'))
layerData = []
layers = []
crs = QgsCoordinateReferenceSystem()
for row in interpolationData.split(';'):
v = row.split(',')
data = QgsInterpolator.LayerData()
# need to keep a reference until interpolation is complete
layer = QgsProcessingUtils.variantToSource(v[0], context)
data.source = layer
layers.append(layer)
if not crs.isValid():
crs = layer.sourceCrs()
data.valueSource = int(v[1])
data.interpolationAttribute = int(v[2])
if v[3] == '0':
data.sourceType = QgsInterpolator.SourcePoints
elif v[3] == '1':
data.sourceType = QgsInterpolator.SourceStructureLines
else:
data.sourceType = QgsInterpolator.SourceBreakLines
layerData.append(data)
if method == 0:
interpolationMethod = QgsTinInterpolator.Linear
else:
interpolationMethod = QgsTinInterpolator.CloughTocher
(triangulation_sink, triangulation_dest_id) = self.parameterAsSink(parameters, self.TRIANGULATION, context,
QgsTinInterpolator.triangulationFields(), QgsWkbTypes.LineString, crs)
interpolator = QgsTinInterpolator(layerData, interpolationMethod, feedback)
if triangulation_sink is not None:
interpolator.setTriangulationSink(triangulation_sink)
writer = QgsGridFileWriter(interpolator,
output,
bbox,
columns,
rows)
writer.writeFile(feedback)
return {self.OUTPUT: output, self.TRIANGULATION: triangulation_dest_id}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.