repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
marcelomiky/PythonCodes
|
Intro ML Semcomp/semcomp17_ml/venv/lib/python3.5/site-packages/pip/_vendor/requests/compat.py
|
327
|
1687
|
# -*- coding: utf-8 -*-
"""
requests.compat
~~~~~~~~~~~~~~~
This module handles import compatibility issues between Python 2 and
Python 3.
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
# Note: We've patched out simplejson support in pip because it prevents
# upgrading simplejson on Windows.
# try:
# import simplejson as json
# except (ImportError, SyntaxError):
# # simplejson does not support Python 3.2, it throws a SyntaxError
# # because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
mit
|
indera/titanium_mobile
|
support/module/iphone/templates/build.py
|
33
|
8783
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','__MODULE_ID__.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','___PROJECTNAMEASIDENTIFIER___ModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
apache-2.0
|
sernst/cauldron
|
cauldron/test/cli/interaction/test_interaction_query.py
|
1
|
1337
|
from unittest import mock
from cauldron.cli.interaction import query
from cauldron.test.support import scaffolds
class TestRenderTexts(scaffolds.ResultsTest):
"""..."""
def test_choice(self):
"""
:return:
"""
with mock.patch('builtins.input', return_value=''):
index, value = query.choice(
title='Some input',
prompt='Here are your choices',
choices=['a', 'b', 'c', 'd'],
default_index=2
)
self.assertEqual(index, 2)
self.assertEqual(value, 'c')
def test_confirm(self):
"""
:return:
"""
with mock.patch('builtins.input', return_value='y'):
result = query.confirm(
question='Ja order Nein',
default=False
)
self.assertTrue(result)
with mock.patch('builtins.input', return_value='no'):
result = query.confirm(
question='Ja order Nein',
default=False
)
self.assertFalse(result)
with mock.patch('builtins.input', return_value=''):
result = query.confirm(
question='Ja order Nein',
default=False
)
self.assertFalse(result)
|
mit
|
huiyiqun/check_mk
|
tests/pylint/test_pylint_web.py
|
1
|
2549
|
#!/usr/bin/python
# encoding: utf-8
import os
import sys
import glob
import tempfile
from testlib import cmk_path, cmc_path, cme_path
import testlib.pylint_cmk as pylint_cmk
def get_web_plugin_dirs():
plugin_dirs = sorted(list(set(os.listdir(cmk_path() + "/web/plugins")
+ os.listdir(cmc_path() + "/web/plugins")
+ os.listdir(cme_path() + "/web/plugins"))))
# icons are included from a plugin of views module. Move to the end to
# make them be imported after the views plugins. Same for perfometers.
plugin_dirs.remove("icons")
plugin_dirs.append("icons")
plugin_dirs.remove("perfometer")
plugin_dirs.append("perfometer")
return plugin_dirs
def get_plugin_files(plugin_dir):
files = []
for path in [ cmk_path() + "/web/plugins/" + plugin_dir,
cmc_path() + "/web/plugins/" + plugin_dir,
cme_path() + "/web/plugins/" + plugin_dir ]:
if os.path.exists(path):
files += [ (f, path) for f in os.listdir(path) ]
return sorted(files)
def test_pylint_web(pylint_test_dir):
# Make compiled files import eachother by default
sys.path.insert(0, pylint_test_dir)
modules = glob.glob(cmk_path() + "/web/htdocs/*.py") \
+ glob.glob(cmc_path() + "/web/htdocs/*.py") \
+ glob.glob(cme_path() + "/web/htdocs/*.py")
for module in modules:
print("Copy %s to test directory" % module)
f = open(pylint_test_dir + "/" + os.path.basename(module), "w")
pylint_cmk.add_file(f, module)
f.close()
# Move the whole plugins code to their modules, then
# run pylint only on the modules
for plugin_dir in get_web_plugin_dirs():
files = get_plugin_files(plugin_dir)
for plugin_file, plugin_base in files:
plugin_path = plugin_base +"/"+plugin_file
if plugin_file.startswith('.'):
continue
elif plugin_dir in ["icons","perfometer"]:
module_name = "views"
elif plugin_dir == "pages":
module_name = "modules"
else:
module_name = plugin_dir
print("[%s] add %s" % (module_name, plugin_path))
module = file(pylint_test_dir + "/" + module_name + ".py", "a")
pylint_cmk.add_file(module, plugin_path)
module.close()
exit_code = pylint_cmk.run_pylint(pylint_test_dir)
assert exit_code == 0, "PyLint found an error in the web code"
|
gpl-2.0
|
michaelpacer/pyhawkes
|
pyhawkes/models.py
|
2
|
65600
|
"""
Top level classes for the Hawkes process model.
"""
import abc
import copy
import numpy as np
from scipy.special import gammaln
from scipy.optimize import minimize
from pybasicbayes.models import ModelGibbsSampling, ModelMeanField
from pybasicbayes.util.text import progprint_xrange
from pyhawkes.internals.bias import GammaBias
from pyhawkes.internals.weights import SpikeAndSlabGammaWeights, GammaMixtureWeights
from pyhawkes.internals.impulses import DirichletImpulseResponses
from pyhawkes.internals.parents import DiscreteTimeParents
from pyhawkes.internals.network import StochasticBlockModel, StochasticBlockModelFixedSparsity, ErdosRenyiFixedSparsity
from pyhawkes.utils.basis import CosineBasis
# TODO: Add a simple HomogeneousPoissonProcessModel
class DiscreteTimeStandardHawkesModel(object):
"""
Discrete time standard Hawkes process model with support for
regularized (stochastic) gradient descent.
"""
def __init__(self, K, dt=1.0, dt_max=10.0,
B=5, basis=None,
alpha=1.0, beta=1.0,
allow_instantaneous=False,
W_max=None,
allow_self_connections=True):
"""
Initialize a discrete time network Hawkes model with K processes.
:param K: Number of processes
:param dt: Time bin size
:param dt_max:
"""
self.K = K
self.dt = dt
self.dt_max = dt_max
self.allow_self_connections = allow_self_connections
self.W_max = W_max
# Initialize the basis
if basis is None:
self.B = B
self.allow_instantaneous = allow_instantaneous
self.basis = CosineBasis(self.B, self.dt, self.dt_max, norm=True,
allow_instantaneous=allow_instantaneous)
else:
self.basis = basis
self.allow_instantaneous = basis.allow_instantaneous
self.B = basis.B
assert not (self.allow_instantaneous and self.allow_self_connections), \
"Cannot allow instantaneous self connections"
# Save the gamma prior
assert alpha >= 1.0, "Alpha must be greater than 1.0 to ensure log concavity"
self.alpha = alpha
self.beta = beta
# Initialize with sample from Gamma(alpha, beta)
# self.weights = np.random.gamma(self.alpha, 1.0/self.beta, size=(self.K, 1 + self.K*self.B))
# self.weights = self.alpha/self.beta * np.ones((self.K, 1 + self.K*self.B))
self.weights = 1e-3 * np.ones((self.K, 1 + self.K*self.B))
if not self.allow_self_connections:
self._remove_self_weights()
# Initialize the data list to empty
self.data_list = []
def _remove_self_weights(self):
for k in xrange(self.K):
self.weights[k,1+(k*self.B):1+(k+1)*self.B] = 1e-32
def initialize_with_gibbs_model(self, gibbs_model):
"""
Initialize with a sample from the network Hawkes model
:param W:
:param g:
:return:
"""
assert isinstance(gibbs_model, _DiscreteTimeNetworkHawkesModelBase)
assert gibbs_model.K == self.K
assert gibbs_model.B == self.B
lambda0 = gibbs_model.bias_model.lambda0,
Weff = gibbs_model.weight_model.W_effective
g = gibbs_model.impulse_model.g
for k in xrange(self.K):
self.weights[k,0] = lambda0[k]
self.weights[k,1:] = (Weff[:,k][:,None] * g[:,k,:]).ravel()
if not self.allow_self_connections:
self._remove_self_weights()
def initialize_to_background_rate(self):
if len(self.data_list) > 0:
N = 0
T = 0
for S,_ in self.data_list:
N += S.sum(axis=0)
T += S.shape[0] * self.dt
lambda0 = N / float(T)
self.weights[:,0] = lambda0
@property
def W(self):
WB = self.weights[:,1:].reshape((self.K,self.K, self.B))
# DEBUG
assert WB[0,0,self.B-1] == self.weights[0,1+self.B-1]
assert WB[0,self.K-1,0] == self.weights[0,1+(self.K-1)*self.B]
if self.B > 2:
assert WB[self.K-1,self.K-1,self.B-2] == self.weights[self.K-1,-2]
# Weight matrix is summed over impulse response functions
WT = WB.sum(axis=2)
# Then we transpose so that the weight matrix is (outgoing x incoming)
W = WT.T
return W
@property
def bias(self):
return self.weights[:,0]
def add_data(self, S, F=None, minibatchsize=None):
"""
Add a data set to the list of observations.
First, filter the data with the impulse response basis,
then instantiate a set of parents for this data set.
:param S: a TxK matrix of of event counts for each time bin
and each process.
"""
assert isinstance(S, np.ndarray) and S.ndim == 2 and S.shape[1] == self.K \
and np.amin(S) >= 0 and S.dtype == np.int, \
"Data must be a TxK array of event counts"
T = S.shape[0]
if F is None:
# Filter the data into a TxKxB array
Ftens = self.basis.convolve_with_basis(S)
# Flatten this into a T x (KxB) matrix
# [F00, F01, F02, F10, F11, ... F(K-1)0, F(K-1)(B-1)]
F = Ftens.reshape((T, self.K * self.B))
assert np.allclose(F[:,0], Ftens[:,0,0])
if self.B > 1:
assert np.allclose(F[:,1], Ftens[:,0,1])
if self.K > 1:
assert np.allclose(F[:,self.B], Ftens[:,1,0])
# Prepend a column of ones
F = np.concatenate((np.ones((T,1)), F), axis=1)
# If minibatchsize is not None, add minibatches of data
if minibatchsize is not None:
for offset in np.arange(T, step=minibatchsize):
end = min(offset+minibatchsize, T)
S_mb = S[offset:end,:]
F_mb = F[offset:end,:]
# Add minibatch to the data list
self.data_list.append((S_mb, F_mb))
else:
self.data_list.append((S,F))
def check_stability(self):
"""
Check that the weight matrix is stable
:return:
"""
# Compute the effective weight matrix
W_eff = self.weights.sum(axis=2)
eigs = np.linalg.eigvals(W_eff)
maxeig = np.amax(np.real(eigs))
# print "Max eigenvalue: ", maxeig
if maxeig < 1.0:
return True
else:
return False
def copy_sample(self):
"""
Return a copy of the parameters of the model
:return: The parameters of the model (A,W,\lambda_0, \beta)
"""
# return copy.deepcopy(self.get_parameters())
# Shallow copy the data
data_list = copy.copy(self.data_list)
self.data_list = []
# Make a deep copy without the data
model_copy = copy.deepcopy(self)
# Reset the data and return the data-less copy
self.data_list = data_list
return model_copy
def compute_rate(self, index=None, ks=None):
"""
Compute the rate of the k-th process.
:param index: Which dataset to comput the rate of
:param k: Which process to compute the rate of
:return:
"""
if index is None:
index = 0
_,F = self.data_list[index]
if ks is None:
ks = np.arange(self.K)
if isinstance(ks, int):
R = F.dot(self.weights[ks,:])
return R
elif isinstance(ks, np.ndarray):
Rs = []
for k in ks:
Rs.append(F.dot(self.weights[k,:])[:,None])
return np.concatenate(Rs, axis=1)
else:
raise Exception("ks must be int or array of indices in 0..K-1")
def log_prior(self, ks=None):
"""
Compute the log prior probability of log W
:param ks:
:return:
"""
lp = 0
for k in ks:
# lp += (self.alpha * np.log(self.weights[k,1:])).sum()
# lp += (-self.beta * self.weights[k,1:]).sum()
if self.alpha > 1:
lp += (self.alpha -1) * np.log(self.weights[k,1:]).sum()
lp += (-self.beta * self.weights[k,1:]).sum()
return lp
def log_likelihood(self, indices=None, ks=None):
"""
Compute the log likelihood
:return:
"""
ll = 0
if indices is None:
indices = np.arange(len(self.data_list))
if isinstance(indices, int):
indices = [indices]
for index in indices:
S,F = self.data_list[index]
R = self.compute_rate(index, ks=ks)
if ks is not None:
ll += (S[:,ks] * np.log(R) -R*self.dt).sum()
else:
ll += (S * np.log(R) -R*self.dt).sum()
return ll
def log_posterior(self, indices=None, ks=None):
if ks is None:
ks = np.arange(self.K)
lp = self.log_likelihood(indices, ks)
lp += self.log_prior(ks)
return lp
def heldout_log_likelihood(self, S):
self.add_data(S)
hll = self.log_likelihood(indices=-1)
self.data_list.pop()
return hll
def compute_gradient(self, k, indices=None):
"""
Compute the gradient of the log likelihood with respect
to the log biases and log weights
:param k: Which process to compute gradients for.
If none, return a list of gradients for each process.
"""
grad = np.zeros(1 + self.K * self.B)
if indices is None:
indices = np.arange(len(self.data_list))
# d_W_d_log_W = self._d_W_d_logW(k)
for index in indices:
d_rate_d_W = self._d_rate_d_W(index, k)
# d_rate_d_log_W = d_rate_d_W.dot(d_W_d_log_W)
d_ll_d_rate = self._d_ll_d_rate(index, k)
# d_ll_d_log_W = d_ll_d_rate.dot(d_rate_d_log_W)
d_ll_d_W = d_ll_d_rate.dot(d_rate_d_W)
# grad += d_ll_d_log_W
grad += d_ll_d_W
# Add the prior
# d_log_prior_d_log_W = self._d_log_prior_d_log_W(k)
# grad += d_log_prior_d_log_W
d_log_prior_d_W = self._d_log_prior_d_W(k)
assert np.allclose(d_log_prior_d_W[0], 0.0)
# grad += d_log_prior_d_W.dot(d_W_d_log_W)
grad += d_log_prior_d_W
# Zero out the gradient if
if not self.allow_self_connections:
assert np.allclose(self.weights[k,1+k*self.B:1+(k+1)*self.B], 0.0)
grad[1+k*self.B:1+(k+1)*self.B] = 0
return grad
def _d_ll_d_rate(self, index, k):
S,_ = self.data_list[index]
T = S.shape[0]
rate = self.compute_rate(index, k)
# d/dR S*ln(R) -R*dt
grad = S[:,k] / rate - self.dt * np.ones(T)
return grad
def _d_rate_d_W(self, index, k):
_,F = self.data_list[index]
grad = F
return grad
def _d_W_d_logW(self, k):
"""
Let u = logW
d{e^u}/du = e^u
= W
"""
return np.diag(self.weights[k,:])
def _d_log_prior_d_log_W(self, k):
"""
Use a gamma prior on W (it is log concave for alpha >= 1)
By change of variables this implies that
LN p(LN W) = const + \alpha LN W - \beta W
and
d/d (LN W) (LN p(LN W)) = \alpha - \beta W
TODO: Is this still concave? It is a concave function of W,
but what about of LN W? As a function of u=LN(W) it is
linear plus a -\beta e^u which is concave for beta > 0,
so yes, it is still concave.
So why does BFGS not converge monotonically?
"""
d_log_prior_d_log_W = np.zeros_like(self.weights[k,:])
d_log_prior_d_log_W[1:] = self.alpha - self.beta * self.weights[k,1:]
return d_log_prior_d_log_W
def _d_log_prior_d_W(self, k):
"""
Use a gamma prior on W (it is log concave for alpha >= 1)
and
LN p(W) = (\alpha-1)LN W - \beta W
d/dW LN p(W)) = (\alpha -1)/W - \beta
"""
d_log_prior_d_W = np.zeros_like(self.weights[k,:])
if self.alpha > 1.0:
d_log_prior_d_W[1:] += (self.alpha-1) / self.weights[k,1:]
d_log_prior_d_W[1:] += -self.beta
return d_log_prior_d_W
def fit_with_bfgs_logspace(self):
"""
Fit the model with BFGS
"""
# If W_max is specified, set this as a bound
if self.W_max is not None:
bnds = [(None, None)] + [(None, np.log(self.W_max))] * (self.K * self.B)
else:
bnds = None
def objective(x, k):
self.weights[k,:] = np.exp(x)
self.weights[k,:] = np.nan_to_num(self.weights[k,:])
return np.nan_to_num(-self.log_posterior(ks=np.array([k])))
def gradient(x, k):
self.weights[k,:] = np.exp(x)
self.weights[k,:] = np.nan_to_num(self.weights[k,:])
dll_dW = -self.compute_gradient(k)
d_W_d_log_W = self._d_W_d_logW(k)
return np.nan_to_num(dll_dW.dot(d_W_d_log_W))
itr = [0]
def callback(x):
if itr[0] % 10 == 0:
print "Iteration: %03d\t LP: %.1f" % (itr[0], self.log_posterior())
itr[0] = itr[0] + 1
for k in xrange(self.K):
print "Optimizing process ", k
itr[0] = 0
x0 = np.log(self.weights[k,:])
res = minimize(objective, # Objective function
x0, # Initial value
jac=gradient, # Gradient of the objective
args=(k,), # Arguments to the objective and gradient fns
bounds=bnds, # Bounds on x
callback=callback)
self.weights[k,:] = np.exp(res.x)
def fit_with_bfgs(self):
"""
Fit the model with BFGS
"""
# If W_max is specified, set this as a bound
if self.W_max is not None:
bnds = [(1e-16, None)] + [(1e-16, self.W_max)] * (self.K * self.B)
else:
bnds = [(1e-16, None)] * (1 + self.K * self.B)
def objective(x, k):
self.weights[k,:] = x
return np.nan_to_num(-self.log_posterior(ks=np.array([k])))
def gradient(x, k):
self.weights[k,:] = x
return np.nan_to_num(-self.compute_gradient(k))
itr = [0]
def callback(x):
if itr[0] % 10 == 0:
print "Iteration: %03d\t LP: %.1f" % (itr[0], self.log_posterior())
itr[0] = itr[0] + 1
for k in xrange(self.K):
print "Optimizing process ", k
itr[0] = 0
x0 = self.weights[k,:]
res = minimize(objective, # Objective function
x0, # Initial value
jac=gradient, # Gradient of the objective
args=(k,), # Arguments to the objective and gradient fns
bounds=bnds, # Bounds on x
callback=callback)
self.weights[k,:] = res.x
def gradient_descent_step(self, stepsz=0.01):
grad = np.zeros((self.K, 1+self.K*self.B))
# Compute gradient and take a step for each process
for k in xrange(self.K):
d_W_d_log_W = self._d_W_d_logW(k)
grad[k,:] = self.compute_gradient(k).dot(d_W_d_log_W)
self.weights[k,:] = np.exp(np.log(self.weights[k,:]) + stepsz * grad[k,:])
# Compute the current objective
ll = self.log_likelihood()
return self.weights, ll, grad
def sgd_step(self, prev_velocity, learning_rate, momentum):
"""
Take a step of the stochastic gradient descent algorithm
"""
if prev_velocity is None:
prev_velocity = np.zeros((self.K, 1+self.K*self.B))
# Compute this gradient row by row
grad = np.zeros((self.K, 1+self.K*self.B))
velocity = np.zeros((self.K, 1+self.K*self.B))
# Get a minibatch
mb = np.random.choice(len(self.data_list))
T = self.data_list[mb][0].shape[0]
# Compute gradient and take a step for each process
for k in xrange(self.K):
d_W_d_log_W = self._d_W_d_logW(k)
grad[k,:] = self.compute_gradient(k, indices=[mb]).dot(d_W_d_log_W) / T
velocity[k,:] = momentum * prev_velocity[k,:] + learning_rate * grad[k,:]
# Gradient steps are taken in log weight space
log_weightsk = np.log(self.weights[k,:]) + velocity[k,:]
# The true weights are stored
self.weights[k,:] = np.exp(log_weightsk)
# Compute the current objective
ll = self.log_likelihood()
return self.weights, ll, velocity
class _DiscreteTimeNetworkHawkesModelBase(object):
"""
Discrete time network Hawkes process model with support for
Gibbs sampling inference, variational inference (TODO), and
stochastic variational inference (TODO).
"""
__metaclass__ = abc.ABCMeta
# Define the model components and their default hyperparameters
_basis_class = CosineBasis
_default_basis_hypers = {'norm': True, 'allow_instantaneous': False}
_bkgd_class = GammaBias
_default_bkgd_hypers = {'alpha': 1.0, 'beta': 10.0}
_impulse_class = DirichletImpulseResponses
_default_impulse_hypers = {'gamma' : 1.0}
# Weight, parent, and network class must be specified by subclasses
_weight_class = None
_default_weight_hypers = {}
_parent_class = DiscreteTimeParents
_network_class = None
_default_network_hypers = {}
def __init__(self, K, dt=1.0, dt_max=10.0, B=5,
basis=None, basis_hypers={},
bkgd=None, bkgd_hypers={},
impulse=None, impulse_hypers={},
weights=None, weight_hypers={},
network=None, network_hypers={}):
"""
Initialize a discrete time network Hawkes model with K processes.
:param K: Number of processes
"""
self.K = K
self.dt = dt
self.dt_max = dt_max
self.B = B
# Initialize the data list to empty
self.data_list = []
# Initialize the basis
if basis is not None:
# assert basis.B == B
self.basis = basis
self.B = basis.B
else:
# Use the given basis hyperparameters
self.basis_hypers = copy.deepcopy(self._default_basis_hypers)
self.basis_hypers.update(basis_hypers)
self.basis = self._basis_class(self.B, self.dt, self.dt_max,
**self.basis_hypers)
# Initialize the bias
if bkgd is not None:
self.bias_model = bkgd
else:
# Use the given basis hyperparameters
self.bkgd_hypers = copy.deepcopy(self._default_bkgd_hypers)
self.bkgd_hypers.update(bkgd_hypers)
self.bias_model = self._bkgd_class(self, **self.bkgd_hypers)
# Initialize the impulse response model
if impulse is not None:
assert impulse.B == self.B
assert impulse.K == self.K
self.impulse_model = impulse
else:
# Use the given basis hyperparameters
self.impulse_hypers = copy.deepcopy(self._default_impulse_hypers)
self.impulse_hypers.update(impulse_hypers)
self.impulse_model = self._impulse_class(self, **self.impulse_hypers)
# Initialize the network model
if network is not None:
assert network.K == self.K
self.network = network
else:
# Use the given network hyperparameters
self.network_hypers = copy.deepcopy(self._default_network_hypers)
self.network_hypers.update(network_hypers)
self.network = self._network_class(K=self.K,
**self.network_hypers)
# Check that the model doesn't allow instantaneous self connections
assert not (self.basis.allow_instantaneous and
self.network.allow_self_connections), \
"Cannot allow instantaneous self connections"
# Initialize the weight model
if weights is not None:
assert weights.K == self.K
self.weight_model = weights
else:
self.weight_hypers = copy.deepcopy(self._default_weight_hypers)
self.weight_hypers.update(weight_hypers)
self.weight_model = self._weight_class(self, **self.weight_hypers)
# Expose basic variables
@property
def A(self):
return self.weight_model.A
@property
def W(self):
return self.weight_model.W
@property
def W_effective(self):
return self.weight_model.W_effective
@property
def lambda0(self):
return self.bias_model.lambda0
@property
def impulses(self):
return self.impulse_model.impulses
def initialize_with_standard_model(self, standard_model=None):
"""
Initialize with a standard Hawkes model. Typically this will have
been fit by gradient descent or BFGS, and we just want to copy
over the parameters to get a good starting point for MCMC or VB.
:param W:
:param g:
:return:
"""
if standard_model is None:
standard_model = DiscreteTimeStandardHawkesModel(
K=self.K, dt=self.dt, dt_max=self.dt_max, B=self.B)
for data in self.data_list:
standard_model.add_data(data.S)
standard_model.initialize_to_background_rate()
standard_model.fit_with_bfgs()
else:
from pyhawkes.standard_models import StandardHawkesProcess
assert isinstance(standard_model, StandardHawkesProcess)
assert standard_model.K == self.K
assert standard_model.B == self.B
lambda0 = standard_model.bias
# Get the connection weights
# Wg = standard_model.weights[:,1:].reshape((self.K, self.K, self.B))
# # Permute to out x in x basis
# Wg = np.transpose(Wg, [1,0,2])
# # Sum to get the total weight
# W = Wg.sum(axis=2) + 1e-6
W = standard_model.W + 1e-6
# The impulse responses are normalized weights
# g = Wg / W[:,:,None]
# for k1 in xrange(self.K):
# for k2 in xrange(self.K):
# if g[k1,k2,:].sum() < 1e-2:
# g[k1,k2,:] = 1.0/self.B
g = standard_model.G
# Clip g to make sure it is stable for MF updates
g = np.clip(g, 1e-2, np.inf)
# Make sure g is normalized
g = g / g.sum(axis=2)[:,:,None]
# We need to decide how to set A.
# The simplest is to initialize it to all ones, but
# A = np.ones((self.K, self.K))
# Alternatively, we can start with a sparse matrix
# of only strong connections. What sparsity? How about the
# mean under the network model
# sparsity = self.network.tau1 / (self.network.tau0 + self.network.tau1)
sparsity = self.network.p
A = W > np.percentile(W, (1.0 - sparsity) * 100)
# Set the model parameters
self.bias_model.lambda0 = lambda0.copy('C')
self.weight_model.A = A.copy('C')
self.weight_model.W = W.copy('C')
self.impulse_model.g = g.copy('C')
# if isinstance(self.network, StochasticBlockModel) and not self.network.fixed:
# # Cluster the standard model with kmeans in order to initialize the network
# from sklearn.cluster import KMeans
#
# features = []
# for k in xrange(self.K):
# features.append(np.concatenate((W[:,k], W[k,:])))
#
# self.network.c = KMeans(n_clusters=self.C).fit(np.array(features)).labels_
#
# # print "DEBUG: Do not set p and v in init from standard model"
# self.network.resample_p(self.weight_model.A)
# self.network.resample_v(self.weight_model.A, self.weight_model.W)
# self.network.resample_m()
def add_data(self, S, F=None, minibatchsize=None):
"""
Add a data set to the list of observations.
First, filter the data with the impulse response basis,
then instantiate a set of parents for this data set.
:param S: a TxK matrix of of event counts for each time bin
and each process.
"""
assert isinstance(S, np.ndarray) and S.ndim == 2 and S.shape[1] == self.K \
and np.amin(S) >= 0 and S.dtype == np.int, \
"Data must be a TxK array of event counts"
T = S.shape[0]
# Filter the data into a TxKxB array
if F is not None:
assert isinstance(F, np.ndarray) and F.shape == (T, self.K, self.B), \
"F must be a filtered event count matrix"
else:
F = self.basis.convolve_with_basis(S)
# If minibatchsize is not None, add minibatches of data
if minibatchsize is not None:
for offset in np.arange(T, step=minibatchsize):
end = min(offset+minibatchsize, T)
T_mb = end - offset
S_mb = S[offset:end,:]
F_mb = F[offset:end,:]
# Instantiate parent object for this minibatch
parents = self._parent_class(self, T_mb, S_mb, F_mb)
# Add minibatch to the data list
self.data_list.append(parents)
else:
# Instantiate corresponding parent object
parents = self._parent_class(self, T, S, F)
# Add to the data list
self.data_list.append(parents)
def check_stability(self, verbose=False):
"""
Check that the weight matrix is stable
:return:
"""
if self.K < 100:
eigs = np.linalg.eigvals(self.weight_model.W_effective)
maxeig = np.amax(np.real(eigs))
else:
from scipy.sparse.linalg import eigs
maxeig = eigs(self.weight_model.W_effective, k=1)[0]
if verbose:
print "Max eigenvalue: ", maxeig
return maxeig < 1.0
def copy_sample(self):
"""
Return a copy of the parameters of the model
:return: The parameters of the model (A,W,\lambda_0, \beta)
"""
# return copy.deepcopy(self.get_parameters())
# Shallow copy the data
data_list = copy.copy(self.data_list)
self.data_list = []
# Make a deep copy without the data
model_copy = copy.deepcopy(self)
# Reset the data and return the data-less copy
self.data_list = data_list
return model_copy
def generate(self, keep=True, T=100, print_interval=25, verbose=False):
"""
Generate a new data set with the sampled parameters
:param keep: If True, add the generated data to the data list.
:param T: Number of time bins to simulate.
:return: A TxK
"""
assert isinstance(T, int), "T must be an integer number of time bins"
# Test stability
self.check_stability()
# Initialize the output
S = np.zeros((T, self.K))
# Precompute the impulse responses (LxKxK array)
G = np.tensordot(self.basis.basis, self.impulse_model.g, axes=([1], [2]))
L = self.basis.L
assert G.shape == (L,self.K, self.K)
H = self.weight_model.W_effective[None,:,:] * G
# Transpose H so that it is faster for tensor mult
H = np.transpose(H, axes=[0,2,1])
# Compute the rate matrix R
R = np.zeros((T+L, self.K))
# Add the background rate
R += self.bias_model.lambda0[None,:]
iterator = progprint_xrange(T, perline=print_interval) if verbose else xrange(T)
# Iterate over time bins
for t in iterator:
# Sample a Poisson number of events for each process
S[t,:] = np.random.poisson(R[t,:] * self.dt)
# Compute change in rate via tensor product
dR = np.tensordot( H, S[t,:], axes=([2, 0]))
R[t:t+L,:] += dR
# For each sampled event, add a weighted impulse response to the rate
# for k in xrange(self.K):
# if S[t,k] > 0:
# R[t+1:t+L+1,:] += S[t,k] * H[:,k,:]
# Check Spike limit
if np.any(S[t,:] >= 1000):
print "More than 1000 events in one time bin!"
import pdb; pdb.set_trace()
# Only keep the first T time bins
S = S[:T,:].astype(np.int)
R = R[:T,:]
if keep:
# Xs = [X[:T,:] for X in Xs]
# data = np.hstack(Xs + [S])
self.add_data(S)
return S, R
def get_parameters(self):
"""
Get a copy of the parameters of the model
:return:
"""
return self.weight_model.A, \
self.weight_model.W, \
self.impulse_model.g, \
self.bias_model.lambda0, \
self.network.p, \
self.network.v
def set_parameters(self, params):
"""
Set the parameters of the model
:param params:
:return:
"""
A, W, beta, lambda0, c, p, v, m = params
K, B, = self.K, self.basis.B
assert isinstance(A, np.ndarray) and A.shape == (K,K), \
"A must be a KxK adjacency matrix"
assert isinstance(W, np.ndarray) and W.shape == (K,K) \
and np.amin(W) >= 0, \
"W must be a KxK weight matrix"
assert isinstance(beta, np.ndarray) and beta.shape == (K,K,B) and \
np.allclose(beta.sum(axis=2), 1.0), \
"beta must be a KxKxB impulse response array"
assert isinstance(lambda0, np.ndarray) and lambda0.shape == (K,) \
and np.amin(lambda0) >=0, \
"lambda0 must be a K-vector of background rates"
self.weight_model.A = A
self.weight_model.W = W
self.impulse_model.g = beta
self.bias_model.lambda0 = lambda0
self.network.c = c
self.network.p = p
self.network.v = v
self.network.m = m
def compute_rate(self, index=0, proc=None, S=None, F=None):
"""
Compute the rate function for a given data set
:param index: An integer specifying which dataset (if S is None)
:param S: TxK array of event counts for which we would like to
compute the model's rate
:return: TxK array of rates
"""
# TODO: Write a Cython function to evaluate this
if S is not None:
assert isinstance(S, np.ndarray) and S.ndim == 2, "S must be a TxK array."
T,K = S.shape
# Filter the data into a TxKxB array
if F is not None:
assert F.shape == (T,K, self.B)
else:
F = self.basis.convolve_with_basis(S)
else:
assert len(self.data_list) > index, "Dataset %d does not exist!" % index
data = self.data_list[index]
T,K,S,F = data.T, data.K, data.S, data.F
if proc is None:
# Compute the rate
R = np.zeros((T,K))
# Background rate
R += self.bias_model.lambda0[None,:]
# Compute the sum of weighted sum of impulse responses
H = self.weight_model.W_effective[:,:,None] * \
self.impulse_model.g
H = np.transpose(H, [2,0,1])
for k2 in xrange(self.K):
R[:,k2] += np.tensordot(F, H[:,:,k2], axes=([2,1], [0,1]))
return R
else:
assert isinstance(proc, int) and proc < self.K, "Proc must be an int"
# Compute the rate
R = np.zeros((T,))
# Background rate
R += self.bias_model.lambda0[proc]
# Compute the sum of weighted sum of impulse responses
H = self.weight_model.W_effective[:,proc,None] * \
self.impulse_model.g[:,proc,:]
R += np.tensordot(F, H, axes=([1,2], [0,1]))
return R
def _poisson_log_likelihood(self, S, R):
"""
Compute the log likelihood of a Poisson matrix with rates R
:param S: Count matrix
:param R: Rate matrix
:return: log likelihood
"""
return (S * np.log(R) - R*self.dt).sum()
def heldout_log_likelihood(self, S, F=None):
"""
Compute the held out log likelihood of a data matrix S.
:param S: TxK matrix of event counts
:return: log likelihood of those counts under the current model
"""
R = self.compute_rate(S=S, F=F)
return self._poisson_log_likelihood(S, R)
# def heldout_log_likelihood(self, S, F=None):
# self.add_data(S, F=F)
# hll = self.log_likelihood(indices=-1)
# self.data_list.pop()
# return hll
def log_prior(self):
# Get the parameter priors
lp = 0
# lp += self.bias_model.log_probability()
lp += self.weight_model.log_probability()
# lp += self.impulse_model.log_probability()
# lp += self.network.log_probability()
return lp
def log_likelihood(self, indices=None):
"""
Compute the joint log probability of the data and the parameters
:return:
"""
ll = 0
if indices is None:
indices = np.arange(len(self.data_list))
if isinstance(indices, int):
indices = [indices]
# Get the likelihood of the datasets
for ind in indices:
ll += self.data_list[ind].log_likelihood()
return ll
def log_probability(self):
"""
Compute the joint log probability of the data and the parameters
:return:
"""
lp = self.log_likelihood()
lp += self.log_prior()
return lp
def plot(self, fig=None, handles=None, figsize=(6,4), color="#377eb8",
data_index=0, T_slice=None):
"""
Plot the rates, events, and weights
:param fig:
:return:
"""
import matplotlib.pyplot as plt
if handles is None:
if fig is None:
fig = plt.figure(figsize=figsize)
# Plot network on left
rate_width = 3
ax_net = plt.subplot2grid((self.K, 1+rate_width), (0,0), rowspan=self.K, colspan=1)
# im = self.plot_adjacency_matrix(ax=ax_net)
net_lns = self.plot_network(ax=ax_net, color=color)
# Plot the rates on the right
axs_rate = [plt.subplot2grid((self.K,4), (k,1), rowspan=1, colspan=rate_width)
for k in xrange(self.K)]
rate_lns = self.plot_rates(axs=axs_rate, data_index=data_index, T_slice=T_slice, color=color)
plt.subplots_adjust(wspace=1.0)
else:
# Update given handles
net_lns, rate_lns = handles
# self.plot_adjacency_matrix(im=im)
self.plot_network(lns=net_lns)
self.plot_rates(lns=rate_lns, data_index=data_index)
plt.pause(0.001)
return fig, (net_lns, rate_lns)
def plot_adjacency_matrix(self, im=None, ax=None, cmap="Reds", vmax=None):
import matplotlib.pyplot as plt
if vmax is None:
vmax = np.max(self.W_effective)
if im is None:
if ax is None:
ax = plt.gca()
im = ax.imshow(self.W_effective, interpolation="none", cmap=cmap, vmin=0, vmax=vmax)
ax.set_ylabel('k')
ax.set_xlabel('k\'')
ax.set_title('W_{k \\to k\'}')
else:
# Update given image
im.set_data(self.W_effective)
return im
def plot_network(self, lns=None, ax=None, rad=10, color="#377eb8"):
import matplotlib.pyplot as plt
W_eff = self.W_effective
ths = np.linspace(0, 2*np.pi, num=self.K, endpoint=False)
irad = 0.8*rad
if lns is None:
if ax is None:
ax = plt.gca()
# Hide the x and y axes
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Layout the nodes in a circle
for k in xrange(self.K):
ax.text(rad*np.cos(ths[k]), rad*np.sin(ths[k]), "%d" % (k+1))
ax.set_xlim(-1.25*rad, 1.35*rad)
ax.set_ylim(-1.25*rad, 1.35*rad)
ax.set_aspect("equal")
# Draw lines connecting nodes
lns = []
for k1 in xrange(self.K):
for k2 in xrange(self.K):
if k1 == k2:
continue
ln = ax.arrow(irad*np.cos(ths[k1]), irad*np.sin(ths[k1]),
.8*irad*(np.cos(ths[k2])-np.cos(ths[k1])),
.8*irad*(np.sin(ths[k2])-np.sin(ths[k1])),
width=0.3,
head_width=2.,
color=color,
length_includes_head=True)
ln.set_linewidth(3*W_eff[k1,k2])
# Arrow is only visible if there is a connection
ln.set_visible(self.A[k1,k2])
lns.append(ln)
else:
# Update given lns
ind = 0
for k1 in xrange(self.K):
for k2 in xrange(self.K):
if k1 == k2:
continue
# Get the corresponding line
ln = lns[ind]
ind += 1
if self.A[k1,k2]:
ln.set_linewidth(3*W_eff[k1,k2])
ln.set_visible(True)
else:
ln.set_linewidth(0)
ln.set_visible(False)
return lns
def plot_rates(self, lns=None, axs=None, draw_events=True, data_index=0, T_slice=None, color="#377eb8"):
import matplotlib.pyplot as plt
assert len(self.data_list) > data_index
data = self.data_list[data_index]
rates = self.compute_rate(data_index)
S = data.S
T_slice = T_slice if T_slice is not None else (0,data.T)
ymax = np.max(S[T_slice[0]:T_slice[1],:])
if lns is None:
lns = []
if axs is None:
axs = plt.subplots(self.K, 1, sharex=True)
else:
assert len(axs) == self.K
for k in xrange(self.K):
ln = axs[k].plot(self.dt * np.arange(data.T),
rates[:,k],
color=color, lw=2)
axs[k].set_ylabel('$\\lambda_{%d}$' % (k+1))
if k == self.K-1:
axs[k].set_xlabel('time $t$')
else:
axs[k].set_xticks([])
axs[k].set_xlim(T_slice)
axs[k].set_ylim(0,1.1*ymax)
lns.append(ln)
if draw_events:
for k in xrange(self.K):
# Get event times and counts
tk = np.nonzero(data.S[:,k])[0]
ck = data.S[tk,k]
# Stem plot
axs[k].stem(tk+self.dt/2., ck, '-k', markerfmt="ko", lw=2)
else:
# Update given rate lns
for k in xrange(self.K):
lns[k][0].set_data((self.dt * np.arange(data.T), rates[:,k]))
return lns
class DiscreteTimeNetworkHawkesModelSpikeAndSlab(_DiscreteTimeNetworkHawkesModelBase, ModelGibbsSampling):
_weight_class = SpikeAndSlabGammaWeights
_default_weight_hypers = {}
_network_class = ErdosRenyiFixedSparsity
_default_network_hypers = {'p': 0.5,
'allow_self_connections': True,
'kappa': 1.0,
'v': None, 'alpha': None, 'beta': None,}
def resample_model(self):
"""
Perform one iteration of the Gibbs sampling algorithm.
:return:
"""
# Update the parents.
# THIS MUST BE DONE IMMEDIATELY FOLLOWING WEIGHT UPDATES!
for p in self.data_list:
p.resample()
# Update the bias model given the parents assigned to the background
self.bias_model.resample(self.data_list)
# Update the impulse model given the parents assignments
self.impulse_model.resample(self.data_list)
# Update the network model
self.network.resample(data=(self.weight_model.A, self.weight_model.W))
# Update the weight model given the parents assignments
self.weight_model.resample(self.data_list)
def initialize_with_standard_model(self, standard_model):
super(DiscreteTimeNetworkHawkesModelSpikeAndSlab, self).\
initialize_with_standard_model(standard_model)
# Update the parents.
for d in self.data_list:
d.resample()
class DiscreteTimeNetworkHawkesModelSpikeAndSlabSBM(DiscreteTimeNetworkHawkesModelSpikeAndSlab):
_network_class = StochasticBlockModel
_default_network_hypers = {'C': 1, 'c': None,
'p': None, 'tau1': 1.0, 'tau0': 1.0,
'allow_self_connections': True,
'kappa': 1.0,
'v': None, 'alpha': 5.0, 'beta': 1.0,
'pi': 1.0}
class DiscreteTimeNetworkHawkesModelGammaMixture(
_DiscreteTimeNetworkHawkesModelBase, ModelGibbsSampling, ModelMeanField):
_weight_class = GammaMixtureWeights
_default_weight_hypers = {'kappa_0': 0.1, 'nu_0': 1000.0}
_network_class = ErdosRenyiFixedSparsity
_default_network_hypers = {'p': 0.5,
'allow_self_connections': True,
'kappa': 1.0,
'v': None, 'alpha': None, 'beta': None,}
def resample_model(self, resample_network=True):
"""
Perform one iteration of the Gibbs sampling algorithm.
:return:
"""
# Update the parents.
for p in self.data_list:
p.resample()
# Update the bias model given the parents assigned to the background
self.bias_model.resample(self.data_list)
# Update the impulse model given the parents assignments
self.impulse_model.resample(self.data_list)
# Update the weight model given the parents assignments
self.weight_model.resample(self.data_list)
# Update the network model
if resample_network:
self.network.resample(data=(self.weight_model.A, self.weight_model.W))
def initialize_with_standard_model(self, standard_model):
super(DiscreteTimeNetworkHawkesModelGammaMixture, self).\
initialize_with_standard_model(standard_model)
# Set the mean field parameters
self.bias_model.mf_alpha = np.clip(100 * self.bias_model.lambda0, 1e-8, np.inf)
self.bias_model.mf_beta = 100 * np.ones(self.K)
# Weight model
self.weight_model.mf_kappa_0 = self.weight_model.nu_0 * self.weight_model.W.copy()
self.weight_model.mf_v_0 = self.weight_model.nu_0 * np.ones((self.K, self.K))
self.weight_model.mf_kappa_1 = 100 * self.weight_model.W.copy()
self.weight_model.mf_v_1 = 100 * np.ones((self.K, self.K))
self.weight_model.mf_p = 0.8 * self.weight_model.A + 0.2 * (1-self.weight_model.A)
# Set mean field parameters of the impulse model
self.impulse_model.mf_gamma = 100 * self.impulse_model.g.copy('C')
# Set network mean field parameters
# if self.C > 1:
# self.network.mf_m = 0.2 / (self.C-1) * np.ones((self.K, self.C))
# for c in xrange(self.C):
# self.network.mf_m[self.network.c == c, c] = 0.8
# else:
# self.network.mf_m = np.ones((self.K, self.C))
# Update the parents.
# for _,_,_,p in self.data_list:
# p.resample(self.bias_model, self.weight_model, self.impulse_model)
# p.meanfieldupdate(self.bias_model, self.weight_model, self.impulse_model)
def meanfield_coordinate_descent_step(self):
# Update the parents.
for p in self.data_list:
p.meanfieldupdate()
# Update the bias model given the parents assigned to the background
self.bias_model.meanfieldupdate(self.data_list)
# Update the impulse model given the parents assignments
self.impulse_model.meanfieldupdate(self.data_list)
# Update the weight model given the parents assignments
self.weight_model.meanfieldupdate(self.data_list)
# Update the network model
self.network.meanfieldupdate(self.weight_model)
return self.get_vlb()
def get_vlb(self):
# Compute the variational lower bound
vlb = 0
for d in self.data_list:
vlb += d.get_vlb()
vlb += self.bias_model.get_vlb()
vlb += self.impulse_model.get_vlb()
vlb += self.weight_model.get_vlb()
vlb += self.network.get_vlb()
return vlb
def sgd_step(self, minibatchsize, stepsize):
# Sample a minibatch of data
assert len(self.data_list) == 1, "We only sample from the first data set"
S, F, T = self.data_list[0].S, self.data_list[0].F, self.data_list[0].T
if not hasattr(self, 'sgd_offset'):
self.sgd_offset = 0
else:
self.sgd_offset += minibatchsize
if self.sgd_offset >= T:
self.sgd_offset = 0
# Grab a slice of S
sgd_end = min(self.sgd_offset+minibatchsize, T)
S_minibatch = S[self.sgd_offset:sgd_end, :]
F_minibatch = F[self.sgd_offset:sgd_end, :, :]
T_minibatch = S_minibatch.shape[0]
minibatchfrac = float(T_minibatch) / T
# Create a parent object for this minibatch
p = self._parent_class(self, T_minibatch, S_minibatch, F_minibatch)
# TODO: Grab one dataset from the data_list and assume
# it has been added in minibatches
# Update the parents using a standard mean field update
p.meanfieldupdate()
# Update the bias model given the parents assigned to the background
self.bias_model.meanfield_sgdstep([p],
minibatchfrac=minibatchfrac,
stepsize=stepsize)
# Update the impulse model given the parents assignments
self.impulse_model.meanfield_sgdstep([p],
minibatchfrac=minibatchfrac,
stepsize=stepsize)
# Update the weight model given the parents assignments
# Compute the number of events in the minibatch
self.weight_model.meanfield_sgdstep([p],
minibatchfrac=minibatchfrac,
stepsize=stepsize)
# Update the network model. This only depends on the global weight model,
# so we can just do a standard mean field update
self.network.meanfield_sgdstep(self.weight_model,
minibatchfrac=minibatchfrac,
stepsize=stepsize)
# Clear the parent buffer for this minibatch
del p
def resample_from_mf(self):
self.bias_model.resample_from_mf()
self.weight_model.resample_from_mf()
self.impulse_model.resample_from_mf()
self.network.resample_from_mf()
class DiscreteTimeNetworkHawkesModelGammaMixtureSBM(DiscreteTimeNetworkHawkesModelGammaMixture):
# This model uses an SBM with beta-distributed sparsity levels
_network_class = StochasticBlockModel
_default_network_hypers = {'C': 1, 'c': None,
'p': None, 'tau1': 1.0, 'tau0': 1.0,
'allow_self_connections': True,
'kappa': 1.0,
'v': None, 'alpha': 1.0, 'beta': 1.0,
'pi': 1.0}
class ContinuousTimeNetworkHawkesModel(ModelGibbsSampling):
_default_bkgd_hypers = {"alpha" : 1.0, "beta" : 1.0}
_default_impulse_hypers = {"mu_0": 0., "lmbda_0": 1.0, "alpha_0": 1.0, "beta_0" : 1.0}
_default_weight_hypers = {}
_network_class = ErdosRenyiFixedSparsity
_default_network_hypers = {'p': 0.5,
'allow_self_connections': True,
'kappa': 1.0,
'v': None, 'alpha': None, 'beta': None,}
def __init__(self, K, dt_max=10.0,
bkgd_hypers={},
impulse_hypers={},
weight_hypers={},
network=None, network_hypers={}):
"""
Initialize a discrete time network Hawkes model with K processes.
:param K: Number of processes
"""
self.K = K
self.dt_max = dt_max
# Initialize the bias
# Use the given basis hyperparameters
self.bkgd_hypers = copy.deepcopy(self._default_bkgd_hypers)
self.bkgd_hypers.update(bkgd_hypers)
from pyhawkes.internals.bias import ContinuousTimeGammaBias
self.bias_model = ContinuousTimeGammaBias(self, self.K, **self.bkgd_hypers)
# Initialize the impulse response model
self.impulse_hypers = copy.deepcopy(self._default_impulse_hypers)
self.impulse_hypers.update(impulse_hypers)
from pyhawkes.internals.impulses import ContinuousTimeImpulseResponses
self.impulse_model = \
ContinuousTimeImpulseResponses(self, **self.impulse_hypers)
# Initialize the network model
# Initialize the network model
if network is not None:
assert network.K == self.K
self.network = network
else:
# Use the given network hyperparameters
self.network_hypers = copy.deepcopy(self._default_network_hypers)
self.network_hypers.update(network_hypers)
self.network = \
self._network_class(K=self.K, **self.network_hypers)
# Initialize the weight model
from pyhawkes.internals.weights import SpikeAndSlabContinuousTimeGammaWeights
self.weight_hypers = copy.deepcopy(self._default_weight_hypers)
self.weight_hypers.update(weight_hypers)
self.weight_model = \
SpikeAndSlabContinuousTimeGammaWeights(self, **self.weight_hypers)
# Initialize the data list to empty
self.data_list = []
# Expose basic variables
@property
def A(self):
return self.weight_model.A
@property
def W(self):
return self.weight_model.W
@property
def W_effective(self):
return self.weight_model.W_effective
@property
def lambda0(self):
return self.bias_model.lambda0
@property
def impulses(self):
return self.impulse_model.impulses
def get_parameters(self):
"""
Get a copy of the parameters of the model
:return:
"""
return self.A, self.W, self.lambda0, self.impulses
def initialize_with_standard_model(self, standard_model):
"""
Initialize with a standard Hawkes model. Typically this will have
been fit by gradient descent or BFGS, and we just want to copy
over the parameters to get a good starting point for MCMC or VB.
:param W:
:param g:
:return:
"""
K = self.K
from pyhawkes.standard_models import StandardHawkesProcess
assert isinstance(standard_model, StandardHawkesProcess)
assert standard_model.K == K
# lambda0 = standard_model.weights[:,0]
lambda0 = standard_model.bias
# Get the connection weights
W = np.clip(standard_model.W, 1e-16, np.inf)
# Get the impulse response parameters
G = standard_model.G
t_basis = standard_model.basis.dt * np.arange(standard_model.basis.L)
t_basis = np.clip(t_basis, 1e-6, self.dt_max-1e-6)
for k1 in xrange(K):
for k2 in xrange(K):
std_ir = standard_model.basis.basis.dot(G[k1,k2,:])
def loss(mutau):
self.impulse_model.mu[k1,k2] = mutau[0]
self.impulse_model.tau[k1,k2] = mutau[1]
ct_ir = self.impulse_model.impulse(t_basis, k1, k2)
return ct_ir - std_ir
from scipy.optimize import leastsq
mutau0 = np.array([self.impulse_model.mu[k1,k2],
self.impulse_model.tau[k1,k2]])
mutau, _ = leastsq(loss, mutau0)
self.impulse_model.mu[k1,k2] = mutau[0]
self.impulse_model.tau[k1,k2] = mutau[1]
# We need to decide how to set A.
# The simplest is to initialize it to all ones, but
# A = np.ones((self.K, self.K))
# Alternatively, we can start with a sparse matrix
# of only strong connections. What sparsity? How about the
# mean under the network model
# sparsity = self.network.tau1 / (self.network.tau0 + self.network.tau1)
sparsity = self.network.p
A = W > np.percentile(W, (1.0 - sparsity) * 100)
# Set the model parameters
self.bias_model.lambda0 = lambda0.copy('C')
self.weight_model.A = A.copy('C')
self.weight_model.W = W.copy('C')
def add_data(self, S, C, T):
"""
Add a data set to the list of observations.
First, filter the data with the impulse response basis,
then instantiate a set of parents for this data set.
:param S: length N array of event times
:param C: length N array of process id's for each event
:param T: max time of
"""
assert isinstance(T, float), "T must be a float"
if len(S) > 0:
assert isinstance(S, np.ndarray) and S.ndim == 1 \
and S.min() >= 0 and S.max() < T and \
S.dtype == np.float, \
"S must be a N array of event times"
# Make sure S is sorted
assert (np.diff(S) >= 0).all(), "S must be sorted!"
if len(C) > 0:
assert isinstance(C, np.ndarray) and C.shape == S.shape \
and C.min() >= 0 and C.max() < self.K and \
C.dtype == np.int, \
"C must be a N array of parent indices"
# Instantiate corresponding parent object
from pyhawkes.internals.parents import ContinuousTimeParents
parents = ContinuousTimeParents(self, S, C, T, self.K, self.dt_max)
# Add to the data list
self.data_list.append(parents)
def generate(self, keep=True, T=100.0, max_round=100, **kwargs):
from pyhawkes.utils.utils import logistic
K, dt_max = self.K, self.dt_max
lambda0 = self.bias_model.lambda0
W, A = self.weight_model.W, self.weight_model.A
g_mu, g_tau = self.impulse_model.mu, self.impulse_model.tau
def _generate_helper(S, C, s_pa, c_pa, round=0):
# Recursively generate new generations of spikes with
# given impulse response parameters. Takes in a single spike
# as the parent and recursively calls itself on all children
# spikes
assert round < max_round, "Exceeded maximum recursion depth of %d" % max_round
for c_ch in np.arange(K):
w = W[c_pa, c_ch]
a = A[c_pa, c_ch]
if w==0 or a==0:
continue
# The total area under the impulse response curve(ratE) is w
# Sample spikes from a homogenous poisson process with rate
# 1 until the time exceeds w. Then transform those spikes
# such that they are distributed under a logistic normal impulse
n_ch = np.random.poisson(w)
# Sample normal RVs and take the logistic of them. This is equivalent
# to sampling uniformly from the inverse CDF
x_ch = g_mu[c_pa, c_ch] + np.sqrt(1./g_tau[c_pa, c_ch])*np.random.randn(n_ch)
# Spike times are logistic transformation of x
s_ch = s_pa + dt_max * logistic(x_ch)
# Only keep spikes within the simulation time interval
s_ch = s_ch[s_ch < T]
n_ch = len(s_ch)
S.append(s_ch)
C.append(c_ch * np.ones(n_ch, dtype=np.int))
# Generate offspring from child spikes
for s in s_ch:
_generate_helper(S, C, s, c_ch, round=round+1)
# Initialize output arrays, a dictionary of numpy arrays
S = []
C = []
# Sample background spikes
for k in np.arange(K):
N = np.random.poisson(lambda0[k]*T)
S_bkgd = np.random.rand(N)*T
C_bkgd = k*np.ones(N, dtype=np.int)
S.append(S_bkgd)
C.append(C_bkgd)
# Each background spike spawns a cascade
for s,c in zip(S_bkgd, C_bkgd):
_generate_helper(S, C, s, c)
# Concatenate arrays
S = np.concatenate(S)
C = np.concatenate(C)
# Sort
perm = np.argsort(S)
S = S[perm]
C = C[perm]
if keep:
self.add_data(S, C, T)
return S, C
def check_stability(self):
"""
Check that the weight matrix is stable
:return:
"""
if self.K < 100:
eigs = np.linalg.eigvals(self.weight_model.W_effective)
maxeig = np.amax(np.real(eigs))
else:
from scipy.sparse.linalg import eigs
maxeig = eigs(self.weight_model.W_effective, k=1)[0]
print "Max eigenvalue: ", maxeig
if maxeig < 1.0:
return True
else:
return False
def copy_sample(self):
"""
Return a copy of the parameters of the model
:return: The parameters of the model (A,W,\lambda_0, \beta)
"""
# return copy.deepcopy(self.get_parameters())
# Shallow copy the data
data_list = copy.copy(self.data_list)
self.data_list = []
# Make a deep copy without the data
model_copy = copy.deepcopy(self)
# Reset the data and return the data-less copy
self.data_list = data_list
return model_copy
def compute_rate_at_events(self, data):
# Compute the instantaneous rate at the individual events
# Sum over potential parents.
# Compute it manually
S, C, dt_max = data.S, data.C, self.dt_max
N = S.shape[0]
lambda0 = self.bias_model.lambda0
W = self.weight_model.W_effective
mu, tau = self.impulse_model.mu, self.impulse_model.tau
# lmbda_manual = np.zeros(N)
# impulse = self.impulse_model.impulse
# # Resample parents
# for n in xrange(N):
# # First parent is just the background rate of this process
# lmbda_manual[n] += lambda0[C[n]]
#
# # Iterate backward from the most recent to compute probabilities of each parent spike
# for par in xrange(n-1, -1, -1):
# dt = S[n] - S[par]
#
# # Since the spikes are sorted, we can stop if we reach a potential
# # parent that occurred greater than dt_max in the past
# if dt > dt_max:
# break
#
# Wparn = W[C[par], C[n]]
# if Wparn > 0:
# lmbda_manual[n] += Wparn * impulse(dt, C[par], C[n])
# Call cython function to evaluate instantaneous rate
from pyhawkes.internals.continuous_time_helpers import compute_rate_at_events
lmbda = np.zeros(N)
compute_rate_at_events(S, C, dt_max, lambda0, W, mu, tau, lmbda)
# assert np.allclose(lmbda_manual, lmbda)
return lmbda
def compute_integrated_rate(self, data, proc=None):
"""
We can approximate this by ignoring events within dt_max of the end.
Since each event induces an impulse response with area W, we
simply need to count events
:param index:
:param proc:
:return:
"""
T, Ns = data.T, data.Ns
W = self.weight_model.W_effective
lmbda0 = self.bias_model.lambda0
# Compute the integral (W is send x recv)
int_lmbda = lmbda0 * T
int_lmbda += W.T.dot(Ns)
assert int_lmbda.shape == (self.K,)
# TODO: Only compute for proc (probably negligible savings)
if proc is None:
return int_lmbda
else:
return int_lmbda[proc]
def log_prior(self):
# Get the parameter priors
lp = 0
lp += self.bias_model.log_probability()
lp += self.weight_model.log_probability()
lp += self.impulse_model.log_probability()
# lp += self.network.log_probability()
return lp
def log_likelihood(self, data=None):
"""
Compute the joint log probability of the data and the parameters
:return:
"""
ll = 0
if data is None:
data = self.data_list
if not isinstance(data, list):
data = [data]
# Get the likelihood of the datasets
for d in data:
# ll += -gammaln(d.N+1)
ll -= self.compute_integrated_rate(d).sum()
ll += np.log(self.compute_rate_at_events(d)).sum()
return ll
def log_probability(self):
"""
Compute the joint log probability of the data and the parameters
:return:
"""
lp = self.log_likelihood()
lp += self.log_prior()
return lp
def heldout_log_likelihood(self, S, C, T):
self.add_data(S, C, T)
data = self.data_list.pop()
return self.log_likelihood(data)
def compute_rate(self, S, C, T, dt=1.0):
# TODO: Write a cythonized version of this
# Compute rate for each process at intervals of dt
t = np.concatenate([np.arange(0, T, step=dt), [T]])
rate = np.zeros((t.size, self.K))
for k in xrange(self.K):
rate[:,k] += self.bias_model.lambda0[k]
# Get the deltas between the time points and the spikes
# Warning: this can be huge!
deltas = t[:,None]-S[None,:]
t_deltas, n_deltas = np.where((deltas>0) & (deltas < self.dt_max))
N_deltas = t_deltas.size
# Find the process the impulse came from
senders = C[n_deltas]
# Compute the impulse responses onto process k for each delta
imps = self.impulse_model.impulse(deltas[t_deltas, n_deltas],
senders,
k
)
rate[t_deltas, k] += imps
return rate, t
def compute_impulses(self, dt=1.0):
dt = np.concatenate([np.arange(0, self.dt_max, step=dt), [self.dt_max]])
ir = np.zeros((dt.size, self.K, self.K))
for k1 in xrange(self.K):
for k2 in xrange(self.K):
ir[:,k1,k2] = self.impulse_model.impulse(dt, k1, k2)
return ir, dt
### Inference
def resample_model(self):
"""
Perform one iteration of the Gibbs sampling algorithm.
:return:
"""
# Update the parents.
# THIS MUST BE DONE IMMEDIATELY FOLLOWING WEIGHT UPDATES!
for p in self.data_list:
p.resample()
# Update the bias model given the parents assigned to the background
self.bias_model.resample(self.data_list)
# # Update the impulse model given the parents assignments
self.impulse_model.resample(self.data_list)
# Update the network model
self.network.resample(data=(self.weight_model.A, self.weight_model.W))
# Update the weight model given the parents assignments
self.weight_model.resample(self.data_list)
|
mit
|
paypal/support
|
support/connection_mgr.py
|
1
|
24065
|
'''
This module provides the capability to from an abstract Paypal name,
such as "paymentserv", or "occ-conf" to an open connection.
The main entry point is the ConnectionManager.get_connection().
This function will promptly either:
1. Raise an Exception which is a subclass of socket.error
2. Return a socket
ConnectionManagers provide the following services:
1. Name resolution ("paymentserv" to actual ip/port from topos)
2. Transient markdown (keeping track of connection failures)
3. Socket throttling (keeping track of total open sockets)
4. Timeouts (connection and read timeouts from opscfg)
5. Protecteds
In addition, by routing all connections through ConnectionManager,
future refactorings/modifications will be easier. For example,
fallbacks or IP multi-plexing.
'''
import time
import datetime
import socket
import random
import weakref
import collections
import gevent.socket
import gevent.ssl
import gevent.resolver_thread
import gevent
import async
import context
import socket_pool
from crypto import SSLContext
import ll
ml = ll.LLogger()
Address = collections.namedtuple('Address', 'ip port')
KNOWN_KEYS = ("connect_timeout_ms", "response_timeout_ms", "max_connect_retry",
"transient_markdown_enabled", "markdown")
ConnectInfo = collections.namedtuple("ConnectInfo", KNOWN_KEYS)
DEFAULT_CONNECT_INFO = ConnectInfo(5000, 30000, 1, False, False)
class ConnectionManager(object):
def __init__(self,
address_groups=None,
address_aliases=None,
ssl_context=None):
self.sockpools = weakref.WeakKeyDictionary() # one socket pool per ssl
# self.sockpools = {weakref(ssl_ctx): {socket_type: [list of sockets]}}
self.address_groups = address_groups
self.address_aliases = address_aliases
self.ssl_context = ssl_context
self.server_models = ServerModelDirectory()
# map of user-level socket objects to MonitoredSocket instances
self.user_socket_map = weakref.WeakKeyDictionary()
# we need to use gevent.spawn instead of async.spawn because
# at the time the connection manager is constructed, the support
# context is not yet fully initialized
self.culler = gevent.spawn(self.cull_loop)
def get_connection(
self, name_or_addr, ssl=False, sock_type=None, read_timeout=None):
'''
:param name_or_addr: The logical name to connect to, e.g. "db-r"
:param ssl: If set to True, wrap socket with context.protected; if set
to an SSL context, wrap socket with that
:param sock_type: A type to wrap the socket in; the intention here is
for protocols that want to run asynchronous keep-alives, or higher
level handshaking (strictly speaking, this is just a callable which
accepts socket and returns the thing that should be pooled, but for
must uses it will probably be a class)
'''
ctx = context.get_context()
address_aliases = self.address_aliases or ctx.address_aliases
#ops_config = self.ops_config or ctx.ops_config
#### POTENTIAL ISSUE: OPS CONFIG IS MORE SPECIFIC THAN ADDRESS (owch)
if isinstance(gevent.get_hub().resolver, gevent.resolver_thread.Resolver):
gevent.get_hub().resolver = _Resolver() # avoid pointless thread dispatches
if name_or_addr in address_aliases:
name_or_addr = address_aliases[name_or_addr]
if isinstance(name_or_addr, basestring): # string means a name
name = name_or_addr
address_list = self.get_all_addrs(name)
else:
address_list = [name_or_addr]
# default to a string-ification of ip for the name
name = address_list[0][0].replace('.', '-')
#if name:
# sock_config = ops_config.get_endpoint_config(name)
#else:
# sock_config = ops_config.get_endpoint_config()
sock_config = DEFAULT_CONNECT_INFO
# ensure all DNS resolution is completed; past this point
# everything is in terms of ips
def get_gai(e):
name = e[0].replace(".","-")
with ctx.log.info('DNS', name) as _log:
gai = gevent.socket.getaddrinfo(*e, family=gevent.socket.AF_INET)[0][4]
context.get_context().name_cache[e] = (time.time(), gai)
return gai
def cache_gai(e):
if context.get_context().name_cache.has_key(e):
age, value = context.get_context().name_cache[e]
if time.time() - age > 600:
async.spawn(get_gai, e)
return value
else:
return get_gai(e)
with ctx.log.get_logger('DNS.CACHE').info(name) as _log:
_log['len'] = len(address_list)
address_list = [cache_gai(e) for e in address_list]
with ctx.log.get_logger('COMPACT').info(name):
self._compact(address_list, name)
errors = []
for address in address_list:
try:
log_name = '%s:%s' % (name, address[0])
with ctx.log.get_logger('CONNECT').info(log_name) as _log:
s = self._connect_to_address(
name, ssl, sock_config, address, sock_type, read_timeout)
if hasattr(s, 'getsockname'):
_log["lport"] = s.getsockname()[1]
elif hasattr(s, '_sock'):
_log["lport"] = s._sock.getsockname()[1]
return s
except socket.error as err:
if len(address_list) == 1:
raise
ml.ld("Connection err {0!r}, {1}, {2!r}", address, name, err)
errors.append((address, err))
raise MultiConnectFailure(errors)
def get_all_addrs(self, name):
'''
Returns the all addresses which the logical name would resolve to,
or raises NameNotFound if there is no known address for the given name.
'''
ctx = context.get_context()
address_groups = self.address_groups or ctx.address_groups
try:
address_list = list(address_groups[name])
except KeyError:
err_str = "no address found for name {0}".format(name)
if ctx.stage_ip is None:
err_str += " (no stage communication configured; did you forget?)"
raise NameNotFound(err_str)
return address_list
def get_addr(self, name):
'''
Returns the first address which the logical name would resolve to,
equivalent to get_all_addrs(name)[0]
'''
return self.get_all_addrs(name)[0]
def _connect_to_address(
self, name, ssl, sock_config, address, sock_type, read_timeout):
'''
Internal helper function that does all the complex bits of establishing
a connection, keeping statistics on connections, handling markdowns.
'''
ctx = context.get_context()
if address not in self.server_models:
self.server_models[address] = ServerModel(address)
server_model = self.server_models[address]
if ssl:
if ssl is True:
ssl_context = self.ssl_context or ctx.ssl_context
if ssl_context is None:
raise EnvironmentError("Unable to make protected connection to " +
repr(name or "unknown") + " at " + repr(address) +
" with no SSLContext loaded.")
elif isinstance(ssl, SSLContext):
protected = ssl
elif ssl == PLAIN_SSL:
protected = PLAIN_SSL_PROTECTED
else:
protected = NULL_PROTECTED # something falsey and weak-refable
if protected not in self.sockpools:
self.sockpools[protected] = {}
if sock_type not in self.sockpools[protected]:
idle_timeout = getattr(sock_type, "idle_timeout", 0.25)
self.sockpools[protected][sock_type] = socket_pool.SocketPool(timeout=idle_timeout)
sock = self.sockpools[protected][sock_type].acquire(address)
msock = None
new_sock = False
if not sock:
if sock_config.transient_markdown_enabled:
last_error = server_model.last_error
if last_error and time.time() - last_error < TRANSIENT_MARKDOWN_DURATION:
raise MarkedDownError()
failed = 0
sock_state = None
# is the connection within the data-center?
# use tighter timeouts if so; using the presence of a
# protected connection as a rough heuristic for now
internal = (ssl and ssl != PLAIN_SSL) or 'mayfly' in name
new_sock = False
while True:
try:
ml.ld("CONNECTING...")
sock_state = ctx.markov_stats['socket.state.' + str(address)].make_transitor('connecting')
log_name = str(address[0]) + ":" + str(address[1])
with ctx.log.get_logger('CONNECT.TCP').info(log_name) as _log:
timeout = sock_config.connect_timeout_ms / 1000.0
if internal: # connect timeout of 50ms inside the data center
timeout = min(timeout, ctx.datacenter_connect_timeout)
sock = gevent.socket.create_connection(address, timeout)
sock.setsockopt(gevent.socket.IPPROTO_TCP, gevent.socket.TCP_NODELAY, 1)
_log['timeout'] = timeout
sock_state.transition('connected')
new_sock = True
ml.ld("CONNECTED local port {0!r}/FD {1}", sock.getsockname(), sock.fileno())
if ssl: # TODO: how should SSL failures interact with markdown & connect count?
sock_state.transition('ssl_handshaking')
with ctx.log.get_logger('CONNECT.SSL').info(log_name) as _log:
if ssl == PLAIN_SSL:
sock = gevent.ssl.wrap_socket(sock)
else:
sock = async.wrap_socket_context(sock, protected.ssl_client_context)
sock_state.transition('ssl_established')
break
except socket.error as err:
if sock_state:
sock_state.transition('closed_error')
if failed >= sock_config.max_connect_retry:
server_model.last_error = time.time()
if sock_config.transient_markdown_enabled:
ctx = context.get_context()
ctx.intervals['net.markdowns.' + str(name) + '.' +
str(address[0]) + ':' + str(address[1])].tick()
ctx.intervals['net.markdowns'].tick()
ctx.log.get_logger('error').critical('TMARKDOWN').error(name=str(name),
addr=str(address))
# was event: ('ERROR', 'TMARKDOWN', '2', 'name=' + str(name) + '&addr=' + str(address))
ml.ld("Connection err {0!r}, {1}, {2!r}", address, name, err)
raise
failed += 1
msock = MonitoredSocket(sock, server_model.active_connections, protected,
name, sock_type, sock_state)
server_model.sock_in_use(msock)
if sock_type:
if getattr(sock_type, "wants_protected", False):
sock = sock_type(msock, protected)
else:
sock = sock_type(msock)
else:
sock = msock
if read_timeout is None:
sock.settimeout(sock_config.response_timeout_ms / 1000.0)
else:
sock.settimeout(read_timeout)
if msock and sock is not msock:
# if sock == msock, collection will not work
self.user_socket_map[sock] = weakref.proxy(msock)
self.user_socket_map.get(sock, sock).state.transition('in_use')
sock.new_sock = new_sock
return sock
def release_connection(self, sock):
# fetch MonitoredSocket
msock = self.user_socket_map.get(sock, sock)
# check the connection for updating of SSL cert (?)
msock.state.transition('pooled')
if context.get_context().sockpool_enabled:
self.sockpools[msock._protected][msock._type].release(sock)
else:
async.killsock(sock)
def cull_loop(self):
while 1:
for pool in sum([e.values() for e in self.sockpools.values()], []):
async.sleep(CULL_INTERVAL)
pool.cull()
async.sleep(CULL_INTERVAL)
def _compact(self, address_list, name):
'''
try to compact and make room for a new socket connection to one of
address_list raises OutOfSockets() if unable to make room
'''
ctx = context.get_context()
sock_log = ctx.log.get_logger('NET.SOCKET')
all_pools = sum([e.values() for e in self.sockpools.values()], [])
with ctx.log.get_logger('CULL').info(name) as _log:
_log['len'] = len(all_pools)
for pool in all_pools:
pool.cull()
total_num_in_use = sum([len(model.active_connections)
for model in self.server_models.values()])
if total_num_in_use >= GLOBAL_MAX_CONNECTIONS:
sock_log.critical('GLOBAL_MAX').success('culling sockets',
limit=GLOBAL_MAX_CONNECTIONS,
in_use=total_num_in_use)
# try to cull sockets to make room
made_room = False
for pool in all_pools:
if pool.total_sockets:
made_room = True
gevent.joinall(pool.reduce_size(pool.total_sockets / 2))
if not made_room:
ctx.intervals['net.out_of_sockets'].tick()
raise OutOfSockets("maximum global socket limit {0} hit: {1}".format(
GLOBAL_MAX_CONNECTIONS, total_num_in_use))
num_in_use = sum([len(self.server_models[address].active_connections)
for address in address_list])
if num_in_use >= MAX_CONNECTIONS:
sock_log.critical('ADDR_MAX').success('culling {addr} sockets',
limit=GLOBAL_MAX_CONNECTIONS,
in_use=total_num_in_use,
addr=repr(address_list))
# try to cull sockets
made_room = False
for pool in all_pools:
for address in address_list:
num_pooled = pool.socks_pooled_for_addr(address)
if num_pooled:
gevent.joinall(pool.reduce_addr_size(address, num_pooled / 2))
made_room = True
if not made_room:
ctx.intervals['net.out_of_sockets'].tick()
ctx.intervals['net.out_of_sockets.' + str(name)].tick()
raise OutOfSockets("maximum sockets for {0} already in use: {1}".format(
name, num_in_use))
return
CULL_INTERVAL = 1.0
# something falsey, and weak-ref-able
NULL_PROTECTED = type("NullProtected", (object,), {'__nonzero__': lambda self: False})()
# a marker for doing plain ssl with no protected
PLAIN_SSL = "PLAIN_SSL"
PLAIN_SSL_PROTECTED = type("PlainSslProtected", (object,), {})()
# TODO: better sources for this?
TRANSIENT_MARKDOWN_DURATION = 10.0 # seconds
try:
import resource
MAX_CONNECTIONS = int(0.8 * resource.getrlimit(resource.RLIMIT_NOFILE)[0])
GLOBAL_MAX_CONNECTIONS = MAX_CONNECTIONS
except:
MAX_CONNECTIONS = 800
GLOBAL_MAX_CONNECTIONS = 800
# At least, move these to context object for now
class _Resolver(gevent.resolver_thread.Resolver):
'''
See gevent.resolver_thread module. This is a way to avoid thread
dispatch for getaddrinfo called on (ip, port) tuples, since that is
such a common case and the thread dispatch seems to occassionally go
off the rails in high-load environments like stage2.
'''
def getaddrinfo(self, *args, **kwargs):
'''
only short-cut for one very specific case which is extremely
common in our code; don\'t worry about short-cutting the thread
dispatch for all possible cases
'''
if len(args) == 2 and isinstance(args[1], (int, long)):
try:
socket.inet_aton(args[0])
except socket.error:
pass
else: # args is of form (ip_string, integer)
return socket.getaddrinfo(*args)
return super(_Resolver, self).getaddrinfo(*args, **kwargs)
class ServerModelDirectory(dict):
def __missing__(self, key):
self[key] = ServerModel(key)
return self[key]
class ServerModel(object):
'''
This class represents an estimate of the state of a given "server".
"Server" is defined here by whatever accepts the socket
connections, which in practice may be an entire pool of server
machines/VMS, each of which has multiple worker thread/procs.
For example, estimate how many connections are currently open
(note: only an estimate, since the exact server-side state of the
sockets is unknown)
'''
def __init__(self, address):
self.last_error = 0
self.active_connections = weakref.WeakKeyDictionary()
self.address = address
def sock_in_use(self, sock):
self.active_connections[sock] = time.time()
def __repr__(self):
if self.last_error:
dt = datetime.datetime.fromtimestamp(int(self.last_error))
last_error = dt.strftime('%Y-%m-%d %H:%M:%S')
else:
last_error = "(None)"
return "<ServerModel {0} last_error={1} nconns={2}>".format(
repr(self.address), last_error, len(self.active_connections))
class MonitoredSocket(object):
'''
A socket proxy which allows socket lifetime to be monitored.
'''
def __init__(self, sock, registry, protected, name=None, type=None, state=None):
self._msock = sock
self._registry = registry # TODO: better name for this
self._spawned = time.time()
self._protected = protected
self._type = type
self._peername = self._msock.getpeername()
key_prefix = 'sock.recv.' + str(self._peername)
self._stats_key_duration = key_prefix + '.duration(ms)'
self._stats_key_size = key_prefix + '.size(bytes)'
# alias some functions through for improved performance
# (__getattr__ is pretty slow compared to normal attribute access)
self.name = name
self.state = state
def send(self, data, flags=0):
ret = self._msock.send(data, flags)
context.get_context().store_network_data(
(self.name, self._msock.getpeername()),
self.fileno(), "OUT", data)
return ret
def sendall(self, data, flags=0):
ret = self._msock.sendall(data, flags)
context.get_context().store_network_data(
(self.name, self._peername),
self.fileno(), "OUT", data)
return ret
def recv(self, bufsize, flags=0):
ctx = context.get_context()
start = time.time()
data = self._msock.recv(bufsize, flags)
duration = time.time() - start
ctx.stats[self._stats_key_duration].add(duration)
ctx.stats[self._stats_key_size].add(len(data))
ctx.store_network_data(
(self.name, self._peername), self.fileno(), "IN", data)
return data
def close(self):
if self in self._registry:
del self._registry[self]
if self.state:
self.state.transition('closed')
return self._msock.close()
def shutdown(self, how): # not going to bother tracking half-open sockets
if self in self._registry: # (unlikely they will ever be used)
del self._registry[self]
return self._msock.shutdown(how)
def __repr__(self):
return "<MonitoredSocket " + repr(self._msock) + ">"
def __getattr__(self, attr):
return getattr(self._msock, attr)
class AddressGroup(object):
'''
An address group represents the set of addresses known by a
specific name to a client at runtime. That is, in a specific
environment (stage, live, etc), an address group represents the
set of <ip, port> pairs to try.
An address group consists of tiers. Each tier should be fully
exhausted before moving on to the next; tiers are "fallbacks". A
tier consists of prioritized addresses. Within a tier, the
addresses should be tried in a priority weighted random order.
The simplest way to use an address group is just to iterate over
it, and try each address in the order returned.
::
tiers: [ [(weight, (ip, port)), (weight, (ip, port)) ... ] ... ]
'''
def __init__(self, tiers):
if not any(tiers):
raise ValueError("no addresses provided for address group")
self.tiers = tiers
def connect_ordering(self):
plist = []
for tier in self.tiers:
# Kodos says: "if you can think of a simpler way of
# achieving a weighted random ordering, I'd like to hear
# it" (http://en.wikipedia.org/wiki/Kang_and_Kodos)
tlist = [(random.random() * e[0], e[1]) for e in tier]
tlist.sort()
plist.extend([e[1] for e in tlist])
return plist
def __iter__(self):
return iter(self.connect_ordering())
def __repr__(self):
return "<AddressGroup " + repr(self.tiers) + ">"
class AddressGroupMap(dict):
'''
For dev mode, will lazily pull in additional addresses.
'''
def __missing__(self, key):
ctx = context.get_context()
if ctx.stage_ip and ctx.topos:
newkey = None
for k in (key, key + "_r1", key + "_ca", key + "_r1_ca"):
if k in ctx.topos.apps:
newkey = k
break
if newkey is not None:
# TODO: maybe do r1 / r2 fallback; however, given this
# is stage only that use case is pretty slim
ports = [int(ctx.topos.get_port(newkey))]
val = AddressGroup(([(1, (ctx.stage_ip, p)) for p in ports],))
self.__dict__.setdefault("warnings", {})
self.setdefault("inferred_addresses", []).append((key, val))
self[key] = val
if key != newkey:
self.warnings["inferred_addresses"].append((newkey, val))
self[newkey] = val
return val
self.__dict__.setdefault("errors", {})
self.errors.setdefault("unknown_addresses", set()).add(key)
ctx.intervals["error.address.missing." + repr(key)].tick()
ctx.intervals["error.address.missing"].tick()
raise KeyError("unknown address requested " + repr(key))
_ADDRESS_SUFFIXES = ["_r" + str(i) for i in range(10)]
_ADDRESS_SUFFIXES = ("_ca",) + tuple(["_r" + str(i) for i in range(10)])
class MarkedDownError(socket.error):
pass
class OutOfSockets(socket.error):
pass
class NameNotFound(socket.error):
pass
class MultiConnectFailure(socket.error):
pass
|
bsd-3-clause
|
neharejanjeva/techstitution
|
venv/lib/python2.7/site-packages/setuptools/dist.py
|
16
|
37459
|
__all__ = ['Distribution']
import re
import os
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from distutils.util import rfc822_escape
from setuptools.extern import six
from setuptools.extern.six.moves import map
from pkg_resources.extern import packaging
from setuptools.depends import Require
from setuptools import windows_support
from setuptools.monkey import get_unpatched
from setuptools.config import parse_configuration
import pkg_resources
def _get_unpatched(cls):
warnings.warn("Do not call this function", DeprecationWarning)
return get_unpatched(cls)
# Based on Python 3.5 version
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
version = '1.2'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name())
file.write('Version: %s\n' % self.get_version())
file.write('Summary: %s\n' % self.get_description())
file.write('Home-page: %s\n' % self.get_url())
file.write('Author: %s\n' % self.get_contact())
file.write('Author-email: %s\n' % self.get_contact_email())
file.write('License: %s\n' % self.get_license())
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape(self.get_long_description())
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords)
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
file.write('Requires-Python: %s\n' % self.python_requires)
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x=' + value)
assert not ep.extras
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr, value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
parent, sep, child = nsp.rpartition('.')
if parent and parent not in ns_packages:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k, v in value.items():
if ':' in k:
k, m = k.split(':', 1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: " + m)
list(pkg_resources.parse_requirements(v))
except (TypeError, ValueError, AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
try:
packaging.specifiers.SpecifierSet(value)
except packaging.specifiers.InvalidSpecifier as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid version specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value, six.string_types):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value, dict):
for k, v in value.items():
if not isinstance(k, str):
break
try:
iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr + " must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py", pkgname
)
_Distribution = get_unpatched(distutils.core.Distribution)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self, 'dependency_links', self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
vars(self).setdefault(ep.name, None)
_Distribution.__init__(self, attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
def parse_config_files(self, filenames=None):
"""Parses configuration files from various levels
and loads configuration.
"""
_Distribution.parse_config_files(self, filenames=filenames)
parse_configuration(self, self.command_options)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self, name):
"""Convert feature name to corresponding option attribute name"""
return 'with_' + name.replace('-', '_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
return resolved_dists
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self, ep.name, None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args': ['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name, feature in self.features.items():
self._set_feature(name, None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef = ''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-' + name, None, 'include ' + descr + incdef))
go.append(('without-' + name, None, 'exclude ' + descr + excdef))
no['without-' + name] = 'with-' + name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name, feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name, 1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name, feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name, 0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands', command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def get_command_list(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.get_command_list(self)
def _set_feature(self, name, status):
"""Set feature's inclusion status"""
setattr(self, self._feature_attrname(name), status)
def feature_is_included(self, name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self, self._feature_attrname(name))
def include_feature(self, name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name) == 0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name, 1)
def include(self, **attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k, v in attrs.items():
include = getattr(self, '_include_' + k, None)
if include:
include(v)
else:
self._include_misc(k, v)
def exclude_package(self, package):
"""Remove packages, modules, and extensions in named package"""
pfx = package + '.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self, package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package + '.'
for p in self.iter_distribution_names():
if p == package or p.startswith(pfx):
return True
def _exclude_misc(self, name, value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self, name, [item for item in old if item not in value])
def _include_misc(self, name, value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self, name, value)
elif not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
else:
setattr(self, name, old + [item for item in value if item not in old])
def exclude(self, **attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k, v)
def _exclude_packages(self, packages):
if not isinstance(packages, sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src, alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias, True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class, 'command_consumes_arguments', None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd, opts in self.command_options.items():
for opt, (src, val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_', '-')
if val == 0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
for neg, pos in neg_opt.items():
if pos == opt:
opt = neg
val = None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val == 1:
val = None
d.setdefault(cmd, {})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext, tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if six.PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See https://github.com/pypa/setuptools/issues/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features, (str, Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r, str)
]
er = [r for r in require_features if not isinstance(r, str)]
if er:
extras['require_features'] = er
if isinstance(remove, str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self, dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description + " is required, "
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self, dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self, dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
|
cc0-1.0
|
steveb/heat
|
heat/engine/resources/openstack/nova/flavor.py
|
2
|
6488
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class NovaFlavor(resource.Resource):
"""A resource for creating OpenStack virtual hardware templates.
Due to default nova security policy usage of this resource is limited to
being used by administrators only. The rights may also be delegated to
other users by redefining the access controls on the nova-api server.
Note that the current implementation of the Nova Flavor resource does not
allow specifying the name and flavorid properties for the resource.
This is done to avoid potential naming collision upon flavor creation as
all flavor have a global scope.
"""
support_status = support.SupportStatus(version='2014.2')
default_client_name = 'nova'
required_service_extension = 'os-flavor-manage'
entity = 'flavors'
PROPERTIES = (
ID, NAME, RAM, VCPUS, DISK, SWAP,
EPHEMERAL, RXTX_FACTOR, EXTRA_SPECS, IS_PUBLIC
) = (
'flavorid', 'name', 'ram', 'vcpus', 'disk', 'swap',
'ephemeral', 'rxtx_factor', 'extra_specs', 'is_public',
)
ATTRIBUTES = (
IS_PUBLIC_ATTR, EXTRA_SPECS_ATTR
) = (
'is_public', 'extra_specs'
)
properties_schema = {
ID: properties.Schema(
properties.Schema.STRING,
_('Unique ID of the flavor. If not specified, '
'an UUID will be auto generated and used.'),
support_status=support.SupportStatus(version='7.0.0')
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the flavor.'),
support_status=support.SupportStatus(version='7.0.0'),
),
RAM: properties.Schema(
properties.Schema.INTEGER,
_('Memory in MB for the flavor.'),
required=True
),
VCPUS: properties.Schema(
properties.Schema.INTEGER,
_('Number of VCPUs for the flavor.'),
required=True
),
DISK: properties.Schema(
properties.Schema.INTEGER,
_('Size of local disk in GB. The "0" size is a special case that '
'uses the native base image size as the size of the ephemeral '
'root volume.'),
default=0
),
SWAP: properties.Schema(
properties.Schema.INTEGER,
_('Swap space in MB.'),
default=0
),
EPHEMERAL: properties.Schema(
properties.Schema.INTEGER,
_('Size of a secondary ephemeral data disk in GB.'),
default=0
),
RXTX_FACTOR: properties.Schema(
properties.Schema.NUMBER,
_('RX/TX factor.'),
default=1.0
),
EXTRA_SPECS: properties.Schema(
properties.Schema.MAP,
_('Key/Value pairs to extend the capabilities of the flavor.'),
update_allowed=True,
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Scope of flavor accessibility. Public or private. '
'Default value is True, means public, shared '
'across all projects.'),
default=True,
support_status=support.SupportStatus(version='6.0.0'),
),
}
attributes_schema = {
IS_PUBLIC_ATTR: attributes.Schema(
_('Whether the flavor is shared across all projects.'),
support_status=support.SupportStatus(version='6.0.0'),
type=attributes.Schema.BOOLEAN
),
EXTRA_SPECS_ATTR: attributes.Schema(
_('Extra specs of the flavor in key-value pairs.'),
support_status=support.SupportStatus(version='7.0.0'),
type=attributes.Schema.MAP
)
}
def handle_create(self):
args = dict(self.properties)
if not args['flavorid']:
args['flavorid'] = 'auto'
if not args['name']:
args['name'] = self.physical_resource_name()
flavor_keys = args.pop(self.EXTRA_SPECS)
flavor = self.client().flavors.create(**args)
self.resource_id_set(flavor.id)
if flavor_keys:
flavor.set_keys(flavor_keys)
tenant = self.stack.context.tenant_id
if not args['is_public']:
# grant access only to the active project(private flavor)
self.client().flavor_access.add_tenant_access(flavor, tenant)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Update nova flavor."""
if self.EXTRA_SPECS in prop_diff:
flavor = self.client().flavors.get(self.resource_id)
old_keys = flavor.get_keys()
flavor.unset_keys(old_keys)
new_keys = prop_diff.get(self.EXTRA_SPECS)
if new_keys is not None:
flavor.set_keys(new_keys)
def _resolve_attribute(self, name):
flavor = self.client().flavors.get(self.resource_id)
if name == self.IS_PUBLIC_ATTR:
return getattr(flavor, name)
if name == self.EXTRA_SPECS_ATTR:
return flavor.get_keys()
def get_live_resource_data(self):
try:
flavor = self.client().flavors.get(self.resource_id)
resource_data = {self.EXTRA_SPECS: flavor.get_keys()}
except Exception as ex:
if self.client_plugin().is_not_found(ex):
raise exception.EntityNotFound(entity='Resource',
name=self.name)
raise
return resource_data
def parse_live_resource_data(self, resource_properties, resource_data):
return resource_data
def resource_mapping():
return {
'OS::Nova::Flavor': NovaFlavor
}
|
apache-2.0
|
Milt0n/CRISPR-Exposed
|
pipelines/multi-fasta-crt.py
|
2
|
2021
|
import os
import re
from utils.fastas import *
crt = "../configuration/tools/CRT1.2-CLI.jar"
data = "../data/"
error_file = open("crt_error.txt", 'w')
fasta_re = re.compile('.*\_genomic.fna$')
dir_list = os.listdir(data)
# tracking progress
number_of_folders = len([name for name in os.listdir(data)])
count = 1
## loop in genomes in Data directory
for genome_dir in dir_list:
genome_dir_list = os.listdir(data+genome_dir)
print("Processing folder %i out of %i (%s)" % (count, number_of_folders, genome_dir))
count += 1
## loop in genome directory looking for genome fasta file(s)
for fasta_file in genome_dir_list:
input_file = re.search(fasta_re,fasta_file)
try:
if(input_file):
## set the absolute path to genome dir
path = data + genome_dir + '/'
fastas = Fastas(path + input_file.group())
## loop in (multi)fasta files using Utils.fastas
for fasta in fastas:
if(len(fasta.seq) < 5000):
continue
## creating a temporary fasta file from a fasta in a multifasta
fasta_content = ">" + fasta.header + '\n' + fasta.seq
fasta_output_path = path + fasta.header.split(' ', 1)[0]
fasta_output = open(fasta_output_path + ".fasta.tmp", 'w')
fasta_output.write(fasta_content)
fasta_output.close()
## applying CRT > java -cp /path/to/CRT crt [input] [output]
os.system('java -cp ' + crt + ' crt ' + fasta_output_path + ".fasta.tmp" + ' ' + fasta_output_path + '.crt.report' + '>> crt.log 2>&1')
## removing temporary fasta files
os.system('rm ' + path + '*.tmp')
except FileNotFoundError as e:
print("not found")
error_file.write(e)
error_file.close()
print("Done!\n")
|
lgpl-3.0
|
nirenzang/Serpent-Pyethereum-Tutorial
|
pyethereum/ethereum/todo_tests/test_solidity.py
|
1
|
8489
|
# -*- coding: utf-8 -*-
from os import path
import pytest
from ethereum.utils import encode_hex
from ethereum import tester
from ethereum import utils
from ethereum import _solidity
from ethereum._solidity import get_solidity
SOLIDITY_AVAILABLE = get_solidity() is not None
CONTRACTS_DIR = path.join(path.dirname(__file__), 'contracts')
def bytecode_is_generated(cinfo, cname):
return 'code' in cinfo[cname] and len(cinfo[cname]['code']) > 10
@pytest.mark.skipif(not SOLIDITY_AVAILABLE, reason='solc compiler not available')
def test_library_from_file():
state = tester.state()
state.env.config['HOMESTEAD_FORK_BLKNUM'] = 0 # enable CALLCODE opcode
library = state.abi_contract(
None,
path=path.join(CONTRACTS_DIR, 'seven_library.sol'),
language='solidity',
)
libraries = {
'SevenLibrary': encode_hex(library.address),
}
contract = state.abi_contract(
None,
path=path.join(CONTRACTS_DIR, 'seven_contract.sol'),
libraries=libraries,
language='solidity',
)
# pylint: disable=no-member
assert library.seven() == 7
assert contract.test() == 7
@pytest.mark.skipif(not SOLIDITY_AVAILABLE, reason='solc compiler not available')
def test_library_from_code():
with open(path.join(CONTRACTS_DIR, 'seven_library.sol')) as handler:
library_code = handler.read()
with open(path.join(CONTRACTS_DIR, 'seven_contract_without_import.sol')) as handler:
contract_code = handler.read()
state = tester.state()
state.env.config['HOMESTEAD_FORK_BLKNUM'] = 0 # enable CALLCODE opcode
library = state.abi_contract(
library_code,
path=None,
language='solidity',
)
libraries = {
'SevenLibrary': encode_hex(library.address),
}
contract = state.abi_contract(
contract_code,
path=None,
libraries=libraries,
language='solidity',
)
# pylint: disable=no-member
assert library.seven() == 7
assert contract.test() == 7
def test_names():
with open(path.join(CONTRACTS_DIR, 'contract_names.sol')) as handler:
code = handler.read()
names_in_order = _solidity.solidity_names(code)
assert ('library', 'InComment') not in names_in_order
assert ('contract', 'InComment') not in names_in_order
assert ('contract', 'WithSpace') in names_in_order
assert ('contract', 'WithLineBreak') in names_in_order
assert names_in_order == [
('contract', 'AContract'),
('library', 'ALibrary'),
('contract', 'WithSpace'),
('contract', 'WithLineBreak'),
]
def test_symbols():
assert _solidity.solidity_library_symbol('a') == '__a_____________________________________'
assert _solidity.solidity_library_symbol('aaa') == '__aaa___________________________________'
assert _solidity.solidity_library_symbol('a' * 40) == '__aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa__'
# the address should be sanitized when it's given to the function
with pytest.raises(Exception):
_solidity.solidity_resolve_address(
'beef__a_____________________________________cafe',
'__a_____________________________________',
'0x1111111111111111111111111111111111111111'
)
# the address needs to be hex encoded
with pytest.raises(Exception):
_solidity.solidity_resolve_address(
'beef__a_____________________________________cafe',
'__a_____________________________________',
'111111111111111111111111111111111111111_'
)
assert _solidity.solidity_resolve_address(
'beef__a_____________________________________cafe',
'__a_____________________________________',
'1111111111111111111111111111111111111111'
) == 'beef1111111111111111111111111111111111111111cafe'
@pytest.mark.skipif(not SOLIDITY_AVAILABLE, reason='solc compiler not available')
def test_interop():
serpent_contract = """
extern solidity: [sub2:[]:i]
def main(a):
return(a.sub2() * 2)
def sub1():
return(5)
"""
solidity_contract = """
contract serpent { function sub1() returns (int256 y) {} }
contract zoo {
function main(address a) returns (int256 y) {
y = serpent(a).sub1() * 2;
}
function sub2() returns (int256 y) {
y = 7;
}
function sub3(address a) returns (address b) {
b = a;
}
}
"""
state = tester.state()
serpent_abi = state.abi_contract(serpent_contract)
solidity_abi = state.abi_contract(solidity_contract, language='solidity') # should be zoo
solidity_address = utils.encode_hex(solidity_abi.address)
# pylint: disable=no-member
assert serpent_abi.sub1() == 5
assert serpent_abi.main(solidity_abi.address) == 14
assert solidity_abi.sub2() == 7
assert solidity_abi.sub3(utils.encode_hex(solidity_abi.address)) == solidity_address
assert solidity_abi.main(serpent_abi.address) == 10
@pytest.mark.skipif(not SOLIDITY_AVAILABLE, reason='solc compiler not available')
def test_constructor():
constructor_contract = """
contract testme {
uint value;
function testme(uint a) {
value = a;
}
function getValue() returns (uint) {
return value;
}
}
"""
state = tester.state()
contract = state.abi_contract(
constructor_contract,
language='solidity',
constructor_parameters=(2, ),
)
# pylint: disable=no-member
assert contract.getValue() == 2
@pytest.mark.skipif(not SOLIDITY_AVAILABLE, reason='solc compiler not available')
def test_solidity_compile_rich():
compile_rich_contract = """
contract contract_add {
function add7(uint a) returns(uint d) { return a + 7; }
function add42(uint a) returns(uint d) { return a + 42; }
}
contract contract_sub {
function subtract7(uint a) returns(uint d) { return a - 7; }
function subtract42(uint a) returns(uint d) { return a - 42; }
}
"""
contract_info = get_solidity().compile_rich(compile_rich_contract)
assert len(contract_info) == 2
assert set(contract_info.keys()) == {'contract_add', 'contract_sub'}
assert set(contract_info['contract_add'].keys()) == {'info', 'code'}
assert set(contract_info['contract_add']['info'].keys()) == {
'language', 'languageVersion', 'abiDefinition', 'source',
'compilerVersion', 'developerDoc', 'userDoc'
}
assert bytecode_is_generated(contract_info, 'contract_add')
assert bytecode_is_generated(contract_info, 'contract_sub')
assert {
defn['name']
for defn
in contract_info['contract_add']['info']['abiDefinition']
} == {'add7', 'add42'}
assert {
defn['name']
for defn
in contract_info['contract_sub']['info']['abiDefinition']
} == {'subtract7', 'subtract42'}
@pytest.mark.skipif(not SOLIDITY_AVAILABLE, reason='solc compiler not available')
def test_abi_contract():
one_contract = """
contract foo {
function seven() returns (int256 y) {
y = 7;
}
function mul2(int256 x) returns (int256 y) {
y = x * 2;
}
}
"""
state = tester.state()
contract = state.abi_contract(one_contract, language='solidity')
# pylint: disable=no-member
assert contract.seven() == 7
assert contract.mul2(2) == 4
assert contract.mul2(-2) == -4
@pytest.mark.skipif(not SOLIDITY_AVAILABLE, reason='solc compiler not available')
def test_extra_args():
src = """
contract foo {
function add7(uint a) returns(uint d) { return a + 7; }
function add42(uint a) returns(uint d) { return a + 42; }
}
"""
contract_info = get_solidity().compile_rich(
src,
extra_args="--optimize-runs 100"
)
assert bytecode_is_generated(contract_info, 'foo')
contract_info = get_solidity().compile_rich(
src,
extra_args=["--optimize-runs", "100"]
)
assert bytecode_is_generated(contract_info, 'foo')
def test_missing_solc(monkeypatch):
monkeypatch.setattr(_solidity, 'get_compiler_path', lambda: None)
assert _solidity.get_compiler_path() is None
sample_sol_code = "contract SampleContract {}"
with pytest.raises(_solidity.SolcMissing):
_solidity.compile_code(sample_sol_code)
|
gpl-3.0
|
proevo/pythondotorg
|
users/tests/test_templatetags.py
|
4
|
1826
|
from pydotorg.tests.test_classes import TemplateTestCase
from ..factories import UserFactory
class UsersTagsTest(TemplateTestCase):
def test_parse_location(self):
user = UserFactory()
template = "{% load users_tags %}{{ user|user_location }}"
rendered = self.render_string(template, {'user': user})
self.assertEqual(rendered, "")
template = "{% load users_tags %}{{ user|user_location }}"
rendered = self.render_string(template, {'user': user})
self.assertEqual(rendered, "")
template = "{% load users_tags %}{{ user|user_location|default:'Not Specified' }}"
user = UserFactory(membership__city='Lawrence')
rendered = self.render_string(template, {'user': user})
self.assertEqual(rendered, "Lawrence")
user = UserFactory(membership__city='Lawrence', membership__region='KS')
rendered = self.render_string(template, {'user': user})
self.assertEqual(rendered, "Lawrence, KS")
user = UserFactory(membership__region='KS', membership__country='USA')
rendered = self.render_string(template, {'user': user})
self.assertEqual(rendered, 'KS USA')
user = UserFactory(
membership__city='Lawrence',
membership__region='KS',
membership__country='US',
)
rendered = self.render_string(template, {'user': user})
self.assertEqual(rendered, "Lawrence, KS US")
user = UserFactory(membership__city='Paris', membership__country='France')
rendered = self.render_string(template, {'user': user})
self.assertEqual(rendered, "Paris, France")
user = UserFactory(membership__country='France')
rendered = self.render_string(template, {'user': user})
self.assertEqual(rendered, "France")
|
apache-2.0
|
Wuguanping/Server_Manage_Plugin
|
Openstack_Plugin/ironic-plugin-pike/ironic/common/profiler.py
|
4
|
2455
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from osprofiler import initializer
from osprofiler import profiler
from ironic.common import context
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def setup(name, host='0.0.0.0'):
"""Setup OSprofiler notifier and enable profiling.
:param name: name of the service that will be profiled
:param host: hostname or host IP address that the service will be
running on. By default host will be set to 0.0.0.0, but
specifying host name / address usage is highly recommended.
:raises TypeError: in case of invalid connection string for
a notifier backend, which is set in
osprofiler.initializer.init_from_conf.
"""
if not CONF.profiler.enabled:
return
admin_context = context.get_admin_context()
initializer.init_from_conf(conf=CONF,
context=admin_context.to_dict(),
project="ironic",
service=name,
host=host)
LOG.info("OSProfiler is enabled. Trace is generated using "
"[profiler]/hmac_keys specified in ironic.conf. "
"To disable, set [profiler]/enabled=false")
def trace_cls(name, **kwargs):
"""Wrap the OSProfiler trace_cls decorator
Wrap the OSProfiler trace_cls decorator so that it will not try to
patch the class unless OSProfiler is present and enabled in the config
:param name: The name of action. For example, wsgi, rpc, db, etc..
:param kwargs: Any other keyword args used by profiler.trace_cls
"""
def decorator(cls):
if CONF.profiler.enabled:
trace_decorator = profiler.trace_cls(name, kwargs)
return trace_decorator(cls)
return cls
return decorator
|
apache-2.0
|
Jimmy-Morzaria/scikit-learn
|
sklearn/manifold/tests/test_spectral_embedding.py
|
216
|
8091
|
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
|
bsd-3-clause
|
oz123/bottle
|
test/test_importhook.py
|
11
|
1284
|
# -*- coding: utf-8 -*-
import unittest
import sys, os
import imp
class TestImportHooks(unittest.TestCase):
def make_module(self, name, **args):
mod = sys.modules.setdefault(name, imp.new_module(name))
mod.__file__ = '<virtual %s>' % name
mod.__dict__.update(**args)
return mod
def test_direkt_import(self):
mod = self.make_module('bottle_test')
import bottle.ext.test
self.assertEqual(bottle.ext.test, mod)
def test_from_import(self):
mod = self.make_module('bottle_test')
from bottle.ext import test
self.assertEqual(test, mod)
def test_data_import(self):
mod = self.make_module('bottle_test', item='value')
from bottle.ext.test import item
self.assertEqual(item, 'value')
def test_import_fail(self):
''' Test a simple static page with this server adapter. '''
def test():
import bottle.ext.doesnotexist
self.assertRaises(ImportError, test)
def test_ext_isfile(self):
''' The virtual module needs a valid __file__ attribute.
If not, the Google app engine development server crashes on windows.
'''
from bottle import ext
self.assertTrue(os.path.isfile(ext.__file__))
|
mit
|
vlaufer/sc2reader
|
sc2reader/data/__init__.py
|
2
|
14436
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
import json
import pkgutil
try:
from collections import OrderedDict
except ImportError as e:
from ordereddict import OrderedDict
from sc2reader.log_utils import loggable
ABIL_LOOKUP = dict()
for entry in pkgutil.get_data('sc2reader.data', 'ability_lookup.csv').decode('utf8').split('\n'):
if not entry:
continue
str_id, abilities = entry.split(',', 1)
ABIL_LOOKUP[str_id] = abilities.split(',')
UNIT_LOOKUP = dict()
for entry in pkgutil.get_data('sc2reader.data', 'unit_lookup.csv').decode('utf8').split('\n'):
if not entry:
continue
str_id, title = entry.strip().split(',')
UNIT_LOOKUP[str_id] = title
unit_data = pkgutil.get_data('sc2reader.data', 'unit_info.json').decode('utf8')
unit_lookup = json.loads(unit_data)
command_data = pkgutil.get_data('sc2reader.data', 'train_commands.json').decode('utf8')
train_commands = json.loads(command_data)
class Unit(object):
"""Represents an in-game unit."""
def __init__(self, unit_id):
#: A reference to the player that currently owns this unit. Only available for 2.0.8+ replays.
self.owner = None
#: The frame the unit was started at. Only available for 2.0.8+ replays.
#: Specifically, it is the frame the :class:`~sc2reader.events.tracker.UnitInitEvent` is received. For units
#: that are born and not initiated this will be the same as :attr:`finished_at`.
self.started_at = None
#: The frame the unit was finished at. Only available for 2.0.8+ replays.
#: Specifically, it is the frame that the :class:`~sc2reader.events.tracker.UnitDoneEvent` is received. For units
#: that are born and not initiated this will be the frame that the :class:`~sc2reader.events.tracker.UnitBornEvent`
#: is received.
self.finished_at = None
#: The frame the unit died at. Only available for 2.0.8+ replays.
#: Specifically, it is the frame that the :class:`~sc2reader.events.tracker.UnitDiedEvent` is received.
self.died_at = None
#: Deprecated, see :attr:`self.killing_player`
self.killed_by = None
#: A reference to the player that killed this unit. Only available for 2.0.8+ replays.
#: This value is not set if the killer is unknown or not relevant (morphed into a
#: different unit or used to create a building, etc)
self.killing_player = None
#: A reference to the unit that killed this unit. Only available for 2.1+ replays.
#: This value is not set if the killer is unknown or not relevant (morphed into a
#: different unit or used to create a building, etc). If the killing unit dies before
#: the killed unit dies, a bug may cause the killing unit to be None. This can occur
#: due because of projectile speeds.
self.killing_unit = None
#: A list of units that this unit has killed. Only available for 2.1+ replays.
#: The unit only gets credit for the kills that it gets the final blow on.
self.killed_units = list()
#: The unique in-game id for this unit. The id can sometimes be zero because
#: TargetUnitCommandEvents will create a new unit with id zero when a unit
#: behind the fog of war is targetted.
self.id = unit_id
#: A reference to the unit type this unit is current in.
#: e.g. SeigeTank is a different type than SeigeTankSeiged
self._type_class = None
#: A history of all the unit types this unit has had stored
#: in order by frame the type was acquired.
self.type_history = OrderedDict()
#: Is this unit type a hallucinated one?
self.hallucinated = False
self.flags = 0
def apply_flags(self, flags):
self.flags = flags
self.hallucinated = flags & 2 == 2
def set_type(self, unit_type, frame):
self._type_class = unit_type
self.type_history[frame] = unit_type
def is_type(self, unit_type, strict=True):
if strict:
if isinstance(unit_type, int):
if self._type_class:
return unit_type == self._type_class.id
else:
return unit_type == 0
elif isinstance(unit_type, Unit):
return self._type_class == unit_type
else:
if self._type_class:
return unit_type == self._type_class.str_id
else:
return unit_type is None
else:
if isinstance(unit_type, int):
if self._type_class:
return unit_type in [utype.id for utype in self.type_history.values()]
else:
return unit_type == 0
elif isinstance(unit_type, Unit):
return unit_type in self.type_history.values()
else:
if self._type_class:
return unit_type in [utype.str_id for utype in self.type_history.values()]
else:
return unit_type is None
@property
def name(self):
"""The name of the unit type currently active. None if no type is assigned"""
return self._type_class.name if self._type_class else None
@property
def title(self):
return self._type_class.title if self._type_class else None
@property
def type(self):
""" The internal type id of the current unit type of this unit. None if no type is assigned"""
return self._type_class.id if self._type_class else None
@property
def race(self):
""" The race of this unit. One of Terran, Protoss, Zerg, Neutral, or None"""
return self._type_class.race if self._type_class else None
@property
def minerals(self):
""" The mineral cost of the unit. None if no type is assigned"""
return self._type_class.minerals if self._type_class else None
@property
def vespene(self):
""" The vespene cost of the unit. None if no type is assigned"""
return self._type_class.vespene if self._type_class else None
@property
def supply(self):
""" The supply used by this unit. Negative for supply providers. None if no type is assigned """
return self._type_class.supply if self._type_class else None
@property
def is_worker(self):
""" Boolean flagging units as worker units. SCV, MULE, Drone, Probe """
return self._type_class.is_worker if self._type_class else False
@property
def is_building(self):
""" Boolean flagging units as buildings. """
return self._type_class.is_building if self._type_class else False
@property
def is_army(self):
""" Boolean flagging units as army units. """
return self._type_class.is_army if self._type_class else False
def __str__(self):
return "{0} [{1:X}]".format(self.name, self.id)
def __cmp__(self, other):
return cmp(self.id, other.id)
def __lt__(self, other):
return self.id < other.id
def __le__(self, other):
return self.id <= other.id
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return self.id != other.id
def __gt__(self, other):
return self.id > other.id
def __ge__(self, other):
return self.id >= other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return str(self)
class UnitType(object):
""" Represents an in game unit type """
def __init__(self, type_id, str_id=None, name=None, title=None, race=None, minerals=0,
vespene=0, supply=0, is_building=False, is_worker=False, is_army=False):
#: The internal integer id representing this unit type
self.id = type_id
#: The internal string id representing this unit type
self.str_id = str_id
#: The name of this unit type
self.name = name
#: The printable title of this unit type; has spaces and possibly punctuation
self.title = title
#: The race this unit type belongs to
self.race = race
#: The mineral cost of this unit type
self.minerals = minerals
#: The vespene cost of this unit type
self.vespene = vespene
#: The supply cost of this unit type
self.supply = supply
#: Boolean flagging this unit type as a building
self.is_building = is_building
#: Boolean flagging this unit type as a worker
self.is_worker = is_worker
#: Boolean flagging this unit type as an army unit
self.is_army = is_army
class Ability(object):
""" Represents an in-game ability """
def __init__(self, id, name=None, title=None, is_build=False, build_time=0, build_unit=None):
#: The internal integer id representing this ability.
self.id = id
#: The name of this ability
self.name = name
#: The printable title of this ability; has spaces and possibly punctuation.
self.title = title
#: Boolean flagging this ability as creating a new unit.
self.is_build = is_build
#: The number of seconds required to build this unit. 0 if not ``is_build``.
self.build_time = build_time
#: A reference to the :class:`UnitType` type built by this ability. Default to None.
self.build_unit = build_unit
@loggable
class Build(object):
"""
:param build_id: The build number identifying this dataset.
The datapack for a particualr group of builds. Maps internal integer ids
to :class:`Unit` and :class:`Ability` types. Also contains builder methods
for creating new units and changing their types.
All build data is valid for standard games only. For arcade maps milage
may vary.
"""
def __init__(self, build_id):
#: The integer id of the build
self.id = build_id
#: A dictionary mapping integer ids to available unit types.
self.units = dict()
#: A dictionary mapping integer ids to available abilities.
self.abilities = dict()
def create_unit(self, unit_id, unit_type, frame):
"""
:param unit_id: The unique id of this unit.
:param unit_type: The unit type to assign to the new unit
Creates a new unit and assigns it to the specified type.
"""
unit = Unit(unit_id)
self.change_type(unit, unit_type, frame)
return unit
def change_type(self, unit, new_type, frame):
"""
:param unit: The changing types.
:param unit_type: The unit type to assign to this unit
Assigns the given type to a unit.
"""
if new_type in self.units:
unit_type = self.units[new_type]
unit.set_type(unit_type, frame)
else:
self.logger.error("Unable to change type of {0} to {1} [frame {2}]; unit type not found in build {3}".format(unit, new_type, frame, self.id))
def add_ability(self, ability_id, name, title=None, is_build=False, build_time=None, build_unit=None):
ability = Ability(
ability_id,
name=name,
title=title or name,
is_build=is_build,
build_time=build_time,
build_unit=build_unit
)
setattr(self, name, ability)
self.abilities[ability_id] = ability
def add_unit_type(self, type_id, str_id, name, title=None, race='Neutral', minerals=0, vespene=0, supply=0, is_building=False, is_worker=False, is_army=False):
unit = UnitType(
type_id,
str_id=str_id,
name=name,
title=title or name,
race=race,
minerals=minerals,
vespene=vespene,
supply=supply,
is_building=is_building,
is_worker=is_worker,
is_army=is_army,
)
setattr(self, name, unit)
self.units[type_id] = unit
self.units[str_id] = unit
def load_build(expansion, version):
build = Build(version)
unit_file = '{0}/{1}_units.csv'.format(expansion, version)
for entry in pkgutil.get_data('sc2reader.data', unit_file).decode('utf8').split('\n'):
if not entry:
continue
int_id, str_id = entry.strip().split(',')
unit_type = int(int_id, 10)
title = UNIT_LOOKUP[str_id]
values = dict(type_id=unit_type, str_id=str_id, name=title)
for race in ('Protoss', 'Terran', 'Zerg'):
if title.lower() in unit_lookup[race]:
values.update(unit_lookup[race][title.lower()])
values['race'] = race
break
build.add_unit_type(**values)
abil_file = '{0}/{1}_abilities.csv'.format(expansion, version)
build.add_ability(ability_id=0, name='RightClick', title='Right Click')
for entry in pkgutil.get_data('sc2reader.data', abil_file).decode('utf8').split('\n'):
if not entry:
continue
int_id_base, str_id = entry.strip().split(',')
int_id_base = int(int_id_base, 10) << 5
abils = ABIL_LOOKUP[str_id]
real_abils = [(i, abil) for i, abil in enumerate(abils) if abil.strip() != '']
if len(real_abils) == 0:
real_abils = [(0, str_id)]
for index, ability_name in real_abils:
unit_name, build_time = train_commands.get(ability_name, ('', 0))
if 'Hallucinated' in unit_name: # Not really sure how to handle hallucinations
unit_name = unit_name[12:]
build.add_ability(
ability_id=int_id_base | index,
name=ability_name,
is_build=bool(unit_name),
build_unit=getattr(build, unit_name, None),
build_time=build_time
)
return build
# Load the WoL Data
wol_builds = dict()
for version in ('16117', '17326', '18092', '19458', '22612', '24944'):
wol_builds[version] = load_build('WoL', version)
# Load HotS Data
hots_builds = dict()
for version in ('base', '23925', '24247', '24764'):
hots_builds[version] = load_build('HotS', version)
datapacks = builds = {'WoL': wol_builds, 'HotS': hots_builds}
|
mit
|
kaedroho/django
|
tests/forms_tests/tests/tests.py
|
54
|
15816
|
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.forms import CharField, FileField, Form, ModelForm
from django.forms.models import ModelFormMetaclass
from django.test import SimpleTestCase, TestCase
from ..models import (
BoundaryModel, ChoiceFieldModel, ChoiceModel, ChoiceOptionModel, Defaults,
FileModel, OptionalMultiChoiceModel,
)
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
fields = '__all__'
class OptionalMultiChoiceModelForm(ModelForm):
class Meta:
model = OptionalMultiChoiceModel
fields = '__all__'
class ChoiceFieldExclusionForm(ModelForm):
multi_choice = CharField(max_length=50)
class Meta:
exclude = ['multi_choice']
model = ChoiceFieldModel
class EmptyCharLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice']
class EmptyIntegerLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_integer']
class EmptyCharLabelNoneChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_string_w_none']
class FileForm(Form):
file1 = FileField()
class TestTicket14567(TestCase):
"""
The return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"If a model's ManyToManyField has blank=True and is saved with no data, a queryset is returned."
option = ChoiceOptionModel.objects.create(name='default')
form = OptionalMultiChoiceModelForm({'multi_choice_optional': '', 'multi_choice': [option.pk]})
self.assertTrue(form.is_valid())
# The empty value is a QuerySet
self.assertIsInstance(form.cleaned_data['multi_choice_optional'], models.query.QuerySet)
# While we're at it, test whether a QuerySet is returned if there *is* a value.
self.assertIsInstance(form.cleaned_data['multi_choice'], models.query.QuerySet)
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"If a model's ForeignKey has blank=False and a default, no empty option is created (Refs #10792)."
option = ChoiceOptionModel.objects.create(name='default')
choices = list(ChoiceFieldForm().fields['choice'].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, str(option)))
def test_callable_initial_value(self):
"The initial value for a callable default returning a queryset is the pk (refs #13769)"
ChoiceOptionModel.objects.create(id=1, name='default')
ChoiceOptionModel.objects.create(id=2, name='option 2')
ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(
ChoiceFieldForm().as_p(),
"""<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="1" id="initial-id_choice"></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="1" id="initial-id_choice_int"></p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple name="multi_choice" id="id_multi_choice" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="1" id="initial-id_multi_choice_0"></p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="1" id="initial-id_multi_choice_int_0"></p>"""
)
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(
ChoiceFieldForm(initial={
'choice': obj2,
'choice_int': obj2,
'multi_choice': [obj2, obj3],
'multi_choice_int': ChoiceOptionModel.objects.exclude(name="default"),
}).as_p(),
"""<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="2" id="initial-id_choice"></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="2" id="initial-id_choice_int"></p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple name="multi_choice" id="id_multi_choice" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="2" id="initial-id_multi_choice_0">
<input type="hidden" name="initial-multi_choice" value="3" id="initial-id_multi_choice_1"></p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="2" id="initial-id_multi_choice_int_0">
<input type="hidden" name="initial-multi_choice_int" value="3" id="initial-id_multi_choice_int_1"></p>"""
)
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with unicode filename and data #########################
file1 = SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode())
f = FileForm(data={}, files={'file1': file1}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertIn('file1', f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data['file1'])
self.assertEqual(m.file.name, 'tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt')
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PositiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
fields = '__all__'
f = BoundaryForm({'positive_integer': 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# Formfield initial values ########
# If the model has default values for some fields, they are used as the formfield
# initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
fields = '__all__'
self.assertEqual(DefaultsForm().fields['name'].initial, 'class default value')
self.assertEqual(DefaultsForm().fields['def_date'].initial, datetime.date(1980, 1, 1))
self.assertEqual(DefaultsForm().fields['value'].initial, 42)
r1 = DefaultsForm()['callable_default'].as_widget()
r2 = DefaultsForm()['callable_default'].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(name='instance value', def_date=datetime.date(1969, 4, 4), value=12)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial['name'], 'instance value')
self.assertEqual(instance_form.initial['def_date'], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial['value'], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ['name', 'callable_default']
f = ExcludingForm({'name': 'Hello', 'value': 99, 'def_date': datetime.date(1999, 3, 2)})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Hello')
obj = f.save()
self.assertEqual(obj.name, 'class default value')
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
class RelatedModelFormTests(SimpleTestCase):
def test_invalid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B", models.CASCADE)
class Meta:
model = A
fields = '__all__'
msg = (
"Cannot create form field for 'ref' yet, because "
"its related model 'B' has not been loaded yet"
)
with self.assertRaisesMessage(ValueError, msg):
ModelFormMetaclass('Form', (ModelForm,), {'Meta': Meta})
class B(models.Model):
pass
def test_valid_loading_order(self):
"""
Test for issue 10405
"""
class C(models.Model):
ref = models.ForeignKey("D", models.CASCADE)
class D(models.Model):
pass
class Meta:
model = C
fields = '__all__'
self.assertTrue(issubclass(ModelFormMetaclass('Form', (ModelForm,), {'Meta': Meta}), ModelForm))
class ManyToManyExclusionTestCase(TestCase):
def test_m2m_field_exclusion(self):
# Issue 12337. save_instance should honor the passed-in exclude keyword.
opt1 = ChoiceOptionModel.objects.create(id=1, name='default')
opt2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
opt3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
initial = {
'choice': opt1,
'choice_int': opt1,
}
data = {
'choice': opt2.pk,
'choice_int': opt2.pk,
'multi_choice': 'string data!',
'multi_choice_int': [opt1.pk],
}
instance = ChoiceFieldModel.objects.create(**initial)
instance.multi_choice.set([opt2, opt3])
instance.multi_choice_int.set([opt2, opt3])
form = ChoiceFieldExclusionForm(data=data, instance=instance)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['multi_choice'], data['multi_choice'])
form.save()
self.assertEqual(form.instance.choice.pk, data['choice'])
self.assertEqual(form.instance.choice_int.pk, data['choice_int'])
self.assertEqual(list(form.instance.multi_choice.all()), [opt2, opt3])
self.assertEqual([obj.pk for obj in form.instance.multi_choice_int.all()], data['multi_choice_int'])
class EmptyLabelTestCase(TestCase):
def test_empty_field_char(self):
f = EmptyCharLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" required></p>
<p><label for="id_choice">Choice:</label> <select id="id_choice" name="choice">
<option value="" selected>No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>"""
)
def test_empty_field_char_none(self):
f = EmptyCharLabelNoneChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" required></p>
<p><label for="id_choice_string_w_none">Choice string w none:</label>
<select id="id_choice_string_w_none" name="choice_string_w_none">
<option value="" selected>No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>"""
)
def test_save_empty_label_forms(self):
# Saving a form with a blank choice results in the expected
# value being stored in the database.
tests = [
(EmptyCharLabelNoneChoiceForm, 'choice_string_w_none', None),
(EmptyIntegerLabelChoiceForm, 'choice_integer', None),
(EmptyCharLabelChoiceForm, 'choice', ''),
]
for form, key, expected in tests:
with self.subTest(form=form):
f = form({'name': 'some-key', key: ''})
self.assertTrue(f.is_valid())
m = f.save()
self.assertEqual(expected, getattr(m, key))
self.assertEqual('No Preference', getattr(m, 'get_{}_display'.format(key))())
def test_empty_field_integer(self):
f = EmptyIntegerLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" required></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected>No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
def test_get_display_value_on_none(self):
m = ChoiceModel.objects.create(name='test', choice='', choice_integer=None)
self.assertIsNone(m.choice_integer)
self.assertEqual('No Preference', m.get_choice_integer_display())
def test_html_rendering_of_prepopulated_models(self):
none_model = ChoiceModel(name='none-test', choice_integer=None)
f = EmptyIntegerLabelChoiceForm(instance=none_model)
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" value="none-test" required></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected>No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
foo_model = ChoiceModel(name='foo-test', choice_integer=1)
f = EmptyIntegerLabelChoiceForm(instance=foo_model)
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" value="foo-test" required></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="">No Preference</option>
<option value="1" selected>Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
|
bsd-3-clause
|
farr/emcee
|
emcee/moves/mh.py
|
1
|
2442
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from .move import Move
from ..state import State
__all__ = ["MHMove"]
class MHMove(Move):
r"""A general Metropolis-Hastings proposal
Concrete implementations can be made by providing a ``proposal_function``
argument that implements the proposal as described below.
For standard Gaussian Metropolis moves, :class:`moves.GaussianMove` can be
used.
Args:
proposal_function: The proposal function. It should take 2 arguments: a
numpy-compatible random number generator and a ``(K, ndim)`` list
of coordinate vectors. This function should return the proposed
position and the log-ratio of the proposal probabilities
(:math:`\ln q(x;\,x^\prime) - \ln q(x^\prime;\,x)` where
:math:`x^\prime` is the proposed coordinate).
ndim (Optional[int]): If this proposal is only valid for a specific
dimension of parameter space, set that here.
"""
def __init__(self, proposal_function, ndim=None):
self.ndim = ndim
self.get_proposal = proposal_function
def propose(self, model, state):
"""Use the move to generate a proposal and compute the acceptance
Args:
coords: The initial coordinates of the walkers.
log_probs: The initial log probabilities of the walkers.
log_prob_fn: A function that computes the log probabilities for a
subset of walkers.
random: A numpy-compatible random number state.
"""
# Check to make sure that the dimensions match.
nwalkers, ndim = state.coords.shape
if self.ndim is not None and self.ndim != ndim:
raise ValueError("Dimension mismatch in proposal")
# Get the move-specific proposal.
q, factors = self.get_proposal(state.coords, model.random)
# Compute the lnprobs of the proposed position.
new_log_probs, new_blobs = model.compute_log_prob_fn(q)
# Loop over the walkers and update them accordingly.
lnpdiff = new_log_probs - state.log_prob + factors
accepted = np.log(model.random.rand(nwalkers)) < lnpdiff
# Update the parameters
new_state = State(q, log_prob=new_log_probs, blobs=new_blobs)
state = self.update(state, new_state, accepted)
return state, accepted
|
mit
|
ruslanloman/nova
|
nova/tests/unit/scheduler/test_filters.py
|
31
|
8517
|
# Copyright 2012 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import inspect
import sys
from six.moves import range
from nova import filters
from nova import loadables
from nova import test
class Filter1(filters.BaseFilter):
"""Test Filter class #1."""
pass
class Filter2(filters.BaseFilter):
"""Test Filter class #2."""
pass
class FiltersTestCase(test.NoDBTestCase):
def test_filter_all(self):
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
result = base_filter.filter_all(filter_obj_list, filter_properties)
self.assertTrue(inspect.isgenerator(result))
self.assertEqual(['obj1', 'obj3'], list(result))
def test_filter_all_recursive_yields(self):
# Test filter_all() allows generators from previous filter_all()s.
# filter_all() yields results. We want to make sure that we can
# call filter_all() with generators returned from previous calls
# to filter_all().
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
total_iterations = 200
# The order that _filter_one is going to get called gets
# confusing because we will be recursively yielding things..
# We are going to simulate the first call to filter_all()
# returning False for 'obj2'. So, 'obj1' will get yielded
# 'total_iterations' number of times before the first filter_all()
# call gets to processing 'obj2'. We then return 'False' for it.
# After that, 'obj3' gets yielded 'total_iterations' number of
# times.
for x in range(total_iterations):
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
for x in range(total_iterations):
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
objs = iter(filter_obj_list)
for x in range(total_iterations):
# Pass in generators returned from previous calls.
objs = base_filter.filter_all(objs, filter_properties)
self.assertTrue(inspect.isgenerator(objs))
self.assertEqual(['obj1', 'obj3'], list(objs))
def test_get_filtered_objects(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_objs_last = ['last', 'filter3', 'objects3']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
filt2_mock.run_filter_for_index(0).AndReturn(True)
filt2_mock.filter_all(filter_objs_second,
filter_properties).AndReturn(filter_objs_last)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
self.assertEqual(filter_objs_last, result)
def test_get_filtered_objects_for_index(self):
"""Test that we don't call a filter when its
run_filter_for_index() method returns false
"""
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
# return false so filter_all will not be called
filt2_mock.run_filter_for_index(0).AndReturn(False)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
def test_get_filtered_objects_none_response(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
# Shouldn't be called.
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(None)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
self.assertIsNone(result)
|
apache-2.0
|
JosmanPS/django-wiki
|
wiki/plugins/notifications/views.py
|
13
|
2193
|
from __future__ import unicode_literals
from __future__ import absolute_import
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic.edit import FormView
from . import forms
from . import models
class NotificationSettings(FormView):
template_name = 'wiki/plugins/notifications/settings.html'
form_class = forms.SettingsFormSet
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(
NotificationSettings,
self).dispatch(
request,
*args,
**kwargs)
def form_valid(self, formset):
for form in formset:
settings = form.save()
messages.info(
self.request,
_("You will receive notifications %(interval)s for "
"%(articles)d articles") % {
'interval': settings.get_interval_display(),
'articles': self.get_article_subscriptions(form.instance).count(),
}
)
return redirect('wiki:notification_settings')
def get_article_subscriptions(self, nyt_settings):
return models.ArticleSubscription.objects.filter(
subscription__settings=nyt_settings,
article__current_revision__deleted=False,
).select_related(
'article',
'article__current_revision'
).distinct()
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs['user'] = self.request.user
return kwargs
def get_context_data(self, **kwargs):
context = FormView.get_context_data(self, **kwargs)
context['formset'] = kwargs['form']
for form in context['formset']:
if form.instance:
setattr(
form.instance,
'articlesubscriptions',
self.get_article_subscriptions(form.instance)
)
return context
|
gpl-3.0
|
mgedmin/ansible
|
lib/ansible/modules/source_control/hg.py
|
25
|
10473
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
# (c) 2013, Yeukhon Wong <[email protected]>
# (c) 2014, Nate Coraor <[email protected]>
#
# This module was originally inspired by Brad Olson's ansible-module-mercurial
# <https://github.com/bradobro/ansible-module-mercurial>. This module tends
# to follow the git module implementation.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: hg
short_description: Manages Mercurial (hg) repositories.
description:
- Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address.
version_added: "1.0"
author: "Yeukhon Wong (@yeukhon)"
options:
repo:
description:
- The repository address.
required: true
default: null
aliases: [ name ]
dest:
description:
- Absolute path of where the repository should be cloned to.
This parameter is required, unless clone and update are set to no
required: true
default: null
revision:
description:
- Equivalent C(-r) option in hg command which could be the changeset, revision number,
branch name or even tag.
required: false
default: null
aliases: [ version ]
force:
description:
- Discards uncommitted changes. Runs C(hg update -C). Prior to
1.9, the default was `yes`.
required: false
default: "no"
choices: [ "yes", "no" ]
purge:
description:
- Deletes untracked files. Runs C(hg purge).
required: false
default: "no"
choices: [ "yes", "no" ]
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.0"
description:
- If C(no), do not retrieve new revisions from the origin repository
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.3"
description:
- If C(no), do not clone the repository if it does not exist locally.
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to hg executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts."
requirements: [ ]
'''
EXAMPLES = '''
# Ensure the current working copy is inside the stable branch and deletes untracked files if any.
- hg:
repo: https://bitbucket.org/user/repo1
dest: /home/user/repo1
revision: stable
purge: yes
# Example just get information about the repository whether or not it has
# already been cloned locally.
- hg:
repo: git://bitbucket.org/user/repo
dest: /srv/checkout
clone: no
update: no
'''
import os
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class Hg(object):
def __init__(self, module, dest, repo, revision, hg_path):
self.module = module
self.dest = dest
self.repo = repo
self.revision = revision
self.hg_path = hg_path
def _command(self, args_list):
(rc, out, err) = self.module.run_command([self.hg_path] + args_list)
return (rc, out, err)
def _list_untracked(self):
args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print']
return self._command(args)
def get_revision(self):
"""
hg id -b -i -t returns a string in the format:
"<changeset>[+] <branch_name> <tag>"
This format lists the state of the current working copy,
and indicates whether there are uncommitted changes by the
plus sign. Otherwise, the sign is omitted.
Read the full description via hg id --help
"""
(rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest])
if rc != 0:
self.module.fail_json(msg=err)
else:
return to_native(out).strip('\n')
def get_remote_revision(self):
(rc, out, err) = self._command(['id', self.repo])
if rc != 0:
self.module_fail_json(msg=err)
else:
return to_native(out).strip('\n')
def has_local_mods(self):
now = self.get_revision()
if '+' in now:
return True
else:
return False
def discard(self):
before = self.has_local_mods()
if not before:
return False
args = ['update', '-C', '-R', self.dest, '-r', '.']
(rc, out, err) = self._command(args)
if rc != 0:
self.module.fail_json(msg=err)
after = self.has_local_mods()
if before != after and not after: # no more local modification
return True
def purge(self):
# before purge, find out if there are any untracked files
(rc1, out1, err1) = self._list_untracked()
if rc1 != 0:
self.module.fail_json(msg=err1)
# there are some untrackd files
if out1 != '':
args = ['purge', '--config', 'extensions.purge=', '-R', self.dest]
(rc2, out2, err2) = self._command(args)
if rc2 != 0:
self.module.fail_json(msg=err2)
return True
else:
return False
def cleanup(self, force, purge):
discarded = False
purged = False
if force:
discarded = self.discard()
if purge:
purged = self.purge()
if discarded or purged:
return True
else:
return False
def pull(self):
return self._command(
['pull', '-R', self.dest, self.repo])
def update(self):
if self.revision is not None:
return self._command(['update', '-r', self.revision, '-R', self.dest])
return self._command(['update', '-R', self.dest])
def clone(self):
if self.revision is not None:
return self._command(['clone', self.repo, self.dest, '-r', self.revision])
return self._command(['clone', self.repo, self.dest])
@property
def at_revision(self):
"""
There is no point in pulling from a potentially down/slow remote site
if the desired changeset is already the current changeset.
"""
if self.revision is None or len(self.revision) < 7:
# Assume it's a rev number, tag, or branch
return False
(rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
if rc != 0:
self.module.fail_json(msg=err)
if out.startswith(self.revision):
return True
return False
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
repo = dict(required=True, aliases=['name']),
dest = dict(type='path'),
revision = dict(default=None, aliases=['version']),
force = dict(default='no', type='bool'),
purge = dict(default='no', type='bool'),
update = dict(default='yes', type='bool'),
clone = dict(default='yes', type='bool'),
executable = dict(default=None),
),
)
repo = module.params['repo']
dest = module.params['dest']
revision = module.params['revision']
force = module.params['force']
purge = module.params['purge']
update = module.params['update']
clone = module.params['clone']
hg_path = module.params['executable'] or module.get_bin_path('hg', True)
if dest is not None:
hgrc = os.path.join(dest, '.hg/hgrc')
# initial states
before = ''
changed = False
cleaned = False
if not dest and (clone or update):
module.fail_json(msg="the destination directory must be specified unless clone=no and update=no")
hg = Hg(module, dest, repo, revision, hg_path)
# If there is no hgrc file, then assume repo is absent
# and perform clone. Otherwise, perform pull and update.
if not clone and not update:
out = hg.get_remote_revision()
module.exit_json(after=out, changed=False)
if not os.path.exists(hgrc):
if clone:
(rc, out, err) = hg.clone()
if rc != 0:
module.fail_json(msg=err)
else:
module.exit_json(changed=False)
elif not update:
# Just return having found a repo already in the dest path
before = hg.get_revision()
elif hg.at_revision:
# no update needed, don't pull
before = hg.get_revision()
# but force and purge if desired
cleaned = hg.cleanup(force, purge)
else:
# get the current state before doing pulling
before = hg.get_revision()
# can perform force and purge
cleaned = hg.cleanup(force, purge)
(rc, out, err) = hg.pull()
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = hg.update()
if rc != 0:
module.fail_json(msg=err)
after = hg.get_revision()
if before != after or cleaned:
changed = True
module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
if __name__ == '__main__':
main()
|
gpl-3.0
|
povils/git-wipe
|
git_wipe/cli.py
|
1
|
2773
|
import click
import crayons
import blindspin
import sys
import os
from git_wipe.__version__ import __version__
from git_wipe.env import GIT_WIPE_TOKEN
from git_wipe.github_client import GithubClient
from github.GithubException import BadCredentialsException
@click.group(invoke_without_command=True)
@click.option('--help', is_flag=True, default=None, help='Show this message then exit.')
@click.version_option(prog_name=crayons.yellow('git-wipe'), version=__version__)
@click.pass_context
def cli(ctx, help):
if ctx.invoked_subcommand is None:
"""CLI tool for deleting Github branches"""
click.echo(ctx.get_help())
@click.command(help='Cleanup merged branches as Pull Requests')
@click.option('--timeout', default=30, help='Set max timeout in seconds')
@click.option('--token', default=GIT_WIPE_TOKEN, help='Github Access Token')
@click.option('--skip-repository', multiple=True, help='Skip certain repositories')
@click.option('--skip-branch', multiple=True, help='Skip certain branches')
@click.option('--preview', is_flag=True, default=False, help='Preview found branches')
@click.option('--no-interaction', is_flag=True, default=False, help='Do not ask any interactive question')
def cleanup(token, timeout, skip_repository, skip_branch, preview, no_interaction):
if token is None:
token = click.prompt(crayons.green(
'Please enter your Github access token'))
github_client = GithubClient(token, timeout)
try:
click.echo(crayons.green(
'Searching for branches. This may take a while...'), err=True)
with blindspin.spinner():
repo_branches = github_client.get_merged_fork_branches(
skip_repository, skip_branch)
except BadCredentialsException:
click.echo(crayons.red(
'Bad credentials. Please provide valid access token'), err=True)
sys.exit(1)
if not repo_branches:
click.echo(crayons.green(
'Congratulations! No remote branches are available for cleaning up'))
sys.exit(0)
click.echo(crayons.yellow('Founded branches:\n'))
list_branches(repo_branches)
if False == preview:
if False == no_interaction:
click.confirm(crayons.green('\nDelete these branches?'), abort=True)
click.echo(crayons.green('Deleting branches...'))
with blindspin.spinner():
github_client.delete_branches(repo_branches)
click.echo(crayons.green('Done'))
def list_branches(repo_branches):
for repo, branch in repo_branches:
click.echo(crayons.yellow(repo.full_name + ':' + branch.name))
click.echo(crayons.yellow('Total: ' + str(len(repo_branches))))
# Add commands
cli.add_command(cleanup)
if __name__ == '__main__':
cli()
|
mit
|
tensor-tang/Paddle
|
python/paddle/dataset/tests/cifar_test.py
|
6
|
1898
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.dataset.cifar
import unittest
class TestCIFAR(unittest.TestCase):
def check_reader(self, reader):
sum = 0
label = 0
for l in reader():
self.assertEqual(l[0].size, 3072)
if l[1] > label:
label = l[1]
sum += 1
return sum, label
def test_test10(self):
instances, max_label_value = self.check_reader(
paddle.dataset.cifar.test10())
self.assertEqual(instances, 10000)
self.assertEqual(max_label_value, 9)
def test_train10(self):
instances, max_label_value = self.check_reader(
paddle.dataset.cifar.train10())
self.assertEqual(instances, 50000)
self.assertEqual(max_label_value, 9)
def test_test100(self):
instances, max_label_value = self.check_reader(
paddle.dataset.cifar.test100())
self.assertEqual(instances, 10000)
self.assertEqual(max_label_value, 99)
def test_train100(self):
instances, max_label_value = self.check_reader(
paddle.dataset.cifar.train100())
self.assertEqual(instances, 50000)
self.assertEqual(max_label_value, 99)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
kchodorow/tensorflow
|
tensorflow/python/training/server_lib_test.py
|
7
|
16137
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class GrpcServerTest(test.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(GrpcServerTest, self).__init__(methodName)
self._cached_server = server_lib.Server.create_local_server()
def testRunStep(self):
server = self._cached_server
with session.Session(server.target) as sess:
c = constant_op.constant([[2, 1]])
d = constant_op.constant([[1], [2]])
e = math_ops.matmul(c, d)
self.assertAllEqual([[4]], sess.run(e))
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
def testMultipleSessions(self):
server = self._cached_server
c = constant_op.constant([[2, 1]])
d = constant_op.constant([[1], [2]])
e = math_ops.matmul(c, d)
sess_1 = session.Session(server.target)
sess_2 = session.Session(server.target)
self.assertAllEqual([[4]], sess_1.run(e))
self.assertAllEqual([[4]], sess_2.run(e))
sess_1.close()
sess_2.close()
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
# Verifies various reset failures.
def testResetFails(self):
# Creates variable with container name.
with ops.container("test0"):
v0 = variables.Variable(1.0, name="v0")
# Creates variable with default container.
v1 = variables.Variable(2.0, name="v1")
# Verifies resetting the non-existent target returns error.
with self.assertRaises(errors_impl.NotFoundError):
session.Session.reset("nonexistent", ["test0"])
# Verifies resetting with config.
# Verifies that resetting target with no server times out.
with self.assertRaises(errors_impl.DeadlineExceededError):
session.Session.reset(
"grpc://localhost:0", ["test0"],
config=config_pb2.ConfigProto(operation_timeout_in_ms=5))
# Verifies no containers are reset with non-existent container.
server = self._cached_server
sess = session.Session(server.target)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# No container is reset, but the server is reset.
session.Session.reset(server.target, ["test1"])
# Verifies that both variables are still valid.
sess = session.Session(server.target)
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
def _useRPCConfig(self):
"""Return a `tf.ConfigProto` that ensures we use the RPC stack for tests.
This configuration ensures that we continue to exercise the gRPC
stack when testing, rather than using the in-process optimization,
which avoids using gRPC as the transport between a client and
master in the same process.
Returns:
A `tf.ConfigProto`.
"""
return config_pb2.ConfigProto(rpc_options=config_pb2.RPCOptions(
use_rpc_for_inprocess_master=True))
def testLargeConstant(self):
server = self._cached_server
with session.Session(server.target, config=self._useRPCConfig()) as sess:
const_val = np.empty([10000, 3000], dtype=np.float32)
const_val.fill(0.5)
c = constant_op.constant(const_val)
shape_t = array_ops.shape(c)
self.assertAllEqual([10000, 3000], sess.run(shape_t))
def testLargeFetch(self):
server = self._cached_server
with session.Session(server.target, config=self._useRPCConfig()) as sess:
c = array_ops.fill([10000, 3000], 0.5)
expected_val = np.empty([10000, 3000], dtype=np.float32)
expected_val.fill(0.5)
self.assertAllEqual(expected_val, sess.run(c))
def testLargeFeed(self):
server = self._cached_server
with session.Session(server.target, config=self._useRPCConfig()) as sess:
feed_val = np.empty([10000, 3000], dtype=np.float32)
feed_val.fill(0.5)
p = array_ops.placeholder(dtypes.float32, shape=[10000, 3000])
min_t = math_ops.reduce_min(p)
max_t = math_ops.reduce_max(p)
min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
self.assertEqual(0.5, min_val)
self.assertEqual(0.5, max_val)
def testCloseCancelsBlockingOperation(self):
server = self._cached_server
sess = session.Session(server.target, config=self._useRPCConfig())
q = data_flow_ops.FIFOQueue(10, [dtypes.float32])
enqueue_op = q.enqueue(37.0)
dequeue_t = q.dequeue()
sess.run(enqueue_op)
sess.run(dequeue_t)
def blocking_dequeue():
with self.assertRaises(errors_impl.CancelledError):
sess.run(dequeue_t)
blocking_thread = self.checkedThread(blocking_dequeue)
blocking_thread.start()
time.sleep(0.5)
sess.close()
blocking_thread.join()
def testInteractiveSession(self):
server = self._cached_server
# Session creation will warn (in C++) that the place_pruned_graph option
# is not supported, but it should successfully ignore it.
sess = session.InteractiveSession(server.target)
c = constant_op.constant(42.0)
self.assertEqual(42.0, c.eval())
sess.close()
def testSetConfiguration(self):
config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(per_process_gpu_memory_fraction=0.1))
# Configure a server using the default local server options.
server = server_lib.Server.create_local_server(config=config, start=False)
self.assertEqual(0.1, server.server_def.default_session_config.gpu_options.
per_process_gpu_memory_fraction)
# Configure a server using an explicit ServerDefd with an
# overridden config.
cluster_def = server_lib.ClusterSpec({
"localhost": ["localhost:0"]
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def,
job_name="localhost",
task_index=0,
protocol="grpc")
server = server_lib.Server(server_def, config=config, start=False)
self.assertEqual(0.1, server.server_def.default_session_config.gpu_options.
per_process_gpu_memory_fraction)
def testInvalidHostname(self):
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "port"):
_ = server_lib.Server(
{
"local": ["localhost"]
}, job_name="local", task_index=0)
def testTimeoutRaisesException(self):
server = self._cached_server
q = data_flow_ops.FIFOQueue(1, [dtypes.float32])
blocking_t = q.dequeue()
with session.Session(server.target) as sess:
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(blocking_t, options=config_pb2.RunOptions(timeout_in_ms=1000))
with session.Session(server.target, config=self._useRPCConfig()) as sess:
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(blocking_t, options=config_pb2.RunOptions(timeout_in_ms=1000))
def testTwoServersSamePort(self):
# Starting a server with the same target as the cached server should fail.
server = self._cached_server
with self.assertRaises(errors_impl.UnknownError):
_ = server_lib.Server(
{"local_2": [server.target[len("grpc://"):]]})
class ServerDefTest(test.TestCase):
def testLocalServer(self):
cluster_def = server_lib.ClusterSpec({
"local": ["localhost:2222"]
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name="local", task_index=0, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }
}
job_name: 'local' task_index: 0 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = server_lib.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoProcesses(self):
cluster_def = server_lib.ClusterSpec({
"local": ["localhost:2222", "localhost:2223"]
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name="local", task_index=1, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'local' tasks { key: 0 value: 'localhost:2222' }
tasks { key: 1 value: 'localhost:2223' } }
}
job_name: 'local' task_index: 1 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = server_lib.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testTwoJobs(self):
cluster_def = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
}
job_name: 'worker' task_index: 2 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = server_lib.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
def testDenseAndSparseJobs(self):
cluster_def = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": {
0: "worker0:2222",
2: "worker2:2222"
}
}).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc")
self.assertProtoEquals("""
cluster {
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 2 value: 'worker2:2222' } }
}
job_name: 'worker' task_index: 2 protocol: 'grpc'
""", server_def)
# Verifies round trip from Proto->Spec->Proto is correct.
cluster_spec = server_lib.ClusterSpec(cluster_def)
self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
class ClusterSpecTest(test.TestCase):
def testProtoDictDefEquivalences(self):
cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def testClusterSpecAccessors(self):
original_dict = {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"],
"sparse": {
0: "sparse0:2222",
3: "sparse3:2222"
}
}
cluster_spec = server_lib.ClusterSpec(original_dict)
self.assertEqual(original_dict, cluster_spec.as_dict())
self.assertEqual(2, cluster_spec.num_tasks("ps"))
self.assertEqual(3, cluster_spec.num_tasks("worker"))
self.assertEqual(2, cluster_spec.num_tasks("sparse"))
with self.assertRaises(ValueError):
cluster_spec.num_tasks("unknown")
self.assertEqual("ps0:2222", cluster_spec.task_address("ps", 0))
self.assertEqual("sparse0:2222", cluster_spec.task_address("sparse", 0))
with self.assertRaises(ValueError):
cluster_spec.task_address("unknown", 0)
with self.assertRaises(ValueError):
cluster_spec.task_address("sparse", 2)
self.assertEqual([0, 1], cluster_spec.task_indices("ps"))
self.assertEqual([0, 1, 2], cluster_spec.task_indices("worker"))
self.assertEqual([0, 3], cluster_spec.task_indices("sparse"))
with self.assertRaises(ValueError):
cluster_spec.task_indices("unknown")
# NOTE(mrry): `ClusterSpec.job_tasks()` is not recommended for use
# with sparse jobs.
self.assertEqual(["ps0:2222", "ps1:2222"], cluster_spec.job_tasks("ps"))
self.assertEqual(["worker0:2222", "worker1:2222", "worker2:2222"],
cluster_spec.job_tasks("worker"))
self.assertEqual(["sparse0:2222", None, None, "sparse3:2222"],
cluster_spec.job_tasks("sparse"))
with self.assertRaises(ValueError):
cluster_spec.job_tasks("unknown")
def testEmptyClusterSpecIsFalse(self):
self.assertFalse(server_lib.ClusterSpec({}))
def testNonEmptyClusterSpecIsTrue(self):
self.assertTrue(server_lib.ClusterSpec({"job": ["host:port"]}))
def testEq(self):
self.assertEquals(server_lib.ClusterSpec({}), server_lib.ClusterSpec({}))
self.assertEquals(
server_lib.ClusterSpec({
"job": ["host:2222"]
}),
server_lib.ClusterSpec({
"job": ["host:2222"]
}),)
self.assertEquals(
server_lib.ClusterSpec({
"job": {
0: "host:2222"
}
}), server_lib.ClusterSpec({
"job": ["host:2222"]
}))
def testNe(self):
self.assertNotEquals(
server_lib.ClusterSpec({}),
server_lib.ClusterSpec({
"job": ["host:2223"]
}),)
self.assertNotEquals(
server_lib.ClusterSpec({
"job1": ["host:2222"]
}),
server_lib.ClusterSpec({
"job2": ["host:2222"]
}),)
self.assertNotEquals(
server_lib.ClusterSpec({
"job": ["host:2222"]
}),
server_lib.ClusterSpec({
"job": ["host:2223"]
}),)
self.assertNotEquals(
server_lib.ClusterSpec({
"job": ["host:2222", "host:2223"]
}),
server_lib.ClusterSpec({
"job": ["host:2223", "host:2222"]
}),)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
moniqx4/bite-project
|
deps/gdata-python-client/tests/run_service_tests.py
|
38
|
4259
|
#!/usr/bin/python
import sys
import unittest
import module_test_runner
import getopt
import getpass
# Modules whose tests we will run.
import atom_tests.service_test
import gdata_tests.service_test
import gdata_tests.apps.service_test
import gdata_tests.books.service_test
import gdata_tests.calendar.service_test
import gdata_tests.docs.service_test
import gdata_tests.health.service_test
import gdata_tests.spreadsheet.service_test
import gdata_tests.spreadsheet.text_db_test
import gdata_tests.photos.service_test
import gdata_tests.contacts.service_test
import gdata_tests.blogger.service_test
import gdata_tests.youtube.service_test
import gdata_tests.health.service_test
import gdata_tests.contacts.profiles.service_test
def RunAllTests(username, password, spreadsheet_key, worksheet_key,
apps_username, apps_password, apps_domain):
test_runner = module_test_runner.ModuleTestRunner()
test_runner.modules = [atom_tests.service_test,
gdata_tests.service_test,
gdata_tests.apps.service_test,
gdata_tests.base.service_test,
gdata_tests.books.service_test,
gdata_tests.calendar.service_test,
gdata_tests.docs.service_test,
gdata_tests.health.service_test,
gdata_tests.spreadsheet.service_test,
gdata_tests.spreadsheet.text_db_test,
gdata_tests.contacts.service_test,
gdata_tests.youtube.service_test,
gdata_tests.photos.service_test,
gdata_tests.contacts.profiles.service_test,]
test_runner.settings = {'username':username, 'password':password,
'test_image_location':'testimage.jpg',
'ss_key':spreadsheet_key,
'ws_key':worksheet_key,
'apps_username':apps_username,
'apps_password':apps_password,
'apps_domain':apps_domain}
test_runner.RunAllTests()
def GetValuesForTestSettingsAndRunAllTests():
username = ''
password = ''
spreadsheet_key = ''
worksheet_key = ''
apps_domain = ''
apps_username = ''
apps_password = ''
print ('NOTE: Please run these tests only with a test account. '
'The tests may delete or update your data.')
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['username=', 'password=',
'ss_key=', 'ws_key=',
'apps_username=',
'apps_password=',
'apps_domain='])
for o, a in opts:
if o == '--username':
username = a
elif o == '--password':
password = a
elif o == '--ss_key':
spreadsheet_key = a
elif o == '--ws_key':
worksheet_key = a
elif o == '--apps_username':
apps_username = a
elif o == '--apps_password':
apps_password = a
elif o == '--apps_domain':
apps_domain = a
except getopt.GetoptError:
pass
if username == '' and password == '':
print ('Missing --user and --pw command line arguments, '
'prompting for credentials.')
if username == '':
username = raw_input('Please enter your username: ')
if password == '':
password = getpass.getpass()
if spreadsheet_key == '':
spreadsheet_key = raw_input(
'Please enter the key for the test spreadsheet: ')
if worksheet_key == '':
worksheet_key = raw_input(
'Please enter the id for the worksheet to be edited: ')
if apps_username == '':
apps_username = raw_input('Please enter your Google Apps admin username: ')
if apps_password == '':
apps_password = getpass.getpass()
if apps_domain == '':
apps_domain = raw_input('Please enter your Google Apps domain: ')
RunAllTests(username, password, spreadsheet_key, worksheet_key,
apps_username, apps_password, apps_domain)
if __name__ == '__main__':
GetValuesForTestSettingsAndRunAllTests()
|
apache-2.0
|
shuggiefisher/potato
|
django/contrib/gis/geos/point.py
|
403
|
4253
|
from ctypes import c_uint
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos import prototypes as capi
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, float, long)) and isinstance(y, (int, float, long)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, (int, float, long)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, i.next())
capi.cs_sety(cs, 0, i.next())
if ndim == 3: capi.cs_setz(cs, 0, i.next())
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty: return 0
if self.hasz: return 3
else: return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
|
bsd-3-clause
|
qrkourier/ansible
|
lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py
|
12
|
32654
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_nat_gateway
short_description: Manage AWS VPC NAT Gateways.
description:
- Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
version_added: "2.2"
requirements: [boto3, botocore]
options:
state:
description:
- Ensure NAT Gateway is present or absent.
required: false
default: "present"
choices: ["present", "absent"]
nat_gateway_id:
description:
- The id AWS dynamically allocates to the NAT Gateway on creation.
This is required when the absent option is present.
required: false
default: None
subnet_id:
description:
- The id of the subnet to create the NAT Gateway in. This is required
with the present option.
required: false
default: None
allocation_id:
description:
- The id of the elastic IP allocation. If this is not passed and the
eip_address is not passed. An EIP is generated for this NAT Gateway.
required: false
default: None
eip_address:
description:
- The elastic IP address of the EIP you want attached to this NAT Gateway.
If this is not passed and the allocation_id is not passed,
an EIP is generated for this NAT Gateway.
required: false
if_exist_do_not_create:
description:
- if a NAT Gateway exists already in the subnet_id, then do not create a new one.
required: false
default: false
release_eip:
description:
- Deallocate the EIP from the VPC.
- Option is only valid with the absent state.
- You should use this with the wait option. Since you can not release an address while a delete operation is happening.
required: false
default: true
wait:
description:
- Wait for operation to complete before returning.
required: false
default: false
wait_timeout:
description:
- How many seconds to wait for an operation to complete before timing out.
required: false
default: 300
client_token:
description:
- Optional unique token to be used during create to ensure idempotency.
When specifying this option, ensure you specify the eip_address parameter
as well otherwise any subsequent runs will fail.
required: false
author:
- "Allen Sanabria (@linuxdynasty)"
- "Jon Hadfield (@jonhadfield)"
- "Karen Cheng(@Etherdaemon)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create new nat gateway with client token.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
region: ap-southeast-2
client_token: abcd-12345678
register: new_nat_gateway
- name: Create new nat gateway using an allocation-id.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
allocation_id: eipalloc-12345678
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway, using an EIP address and wait for available status.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
if_exist_do_not_create: true
register: new_nat_gateway
- name: Delete nat gateway using discovered nat gateways from facts module.
ec2_vpc_nat_gateway:
state: absent
region: ap-southeast-2
wait: yes
nat_gateway_id: "{{ item.NatGatewayId }}"
release_eip: yes
register: delete_nat_gateway_result
with_items: "{{ gateways_to_remove.result }}"
- name: Delete nat gateway and wait for deleted status.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
wait: yes
wait_timeout: 500
region: ap-southeast-2
- name: Delete nat gateway and release EIP.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
release_eip: yes
wait: yes
wait_timeout: 300
region: ap-southeast-2
'''
RETURN = '''
create_time:
description: The ISO 8601 date time formatin UTC.
returned: In all cases.
type: string
sample: "2016-03-05T05:19:20.282000+00:00'"
nat_gateway_id:
description: id of the VPC NAT Gateway
returned: In all cases.
type: string
sample: "nat-0d1e3a878585988f8"
subnet_id:
description: id of the Subnet
returned: In all cases.
type: string
sample: "subnet-12345"
state:
description: The current state of the NAT Gateway.
returned: In all cases.
type: string
sample: "available"
vpc_id:
description: id of the VPC.
returned: In all cases.
type: string
sample: "vpc-12345"
nat_gateway_addresses:
description: List of dictionairies containing the public_ip, network_interface_id, private_ip, and allocation_id.
returned: In all cases.
type: string
sample: [
{
'public_ip': '52.52.52.52',
'network_interface_id': 'eni-12345',
'private_ip': '10.0.0.100',
'allocation_id': 'eipalloc-12345'
}
]
'''
import datetime
import random
import time
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
camel_dict_to_snake_dict, HAS_BOTO3)
DRY_RUN_GATEWAYS = [
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "available",
"create_time": "2016-03-05T05:19:20.282000+00:00",
"vpc_id": "vpc-12345678"
}
]
DRY_RUN_ALLOCATION_UNCONVERTED = {
'Addresses': [
{
'PublicIp': '55.55.55.55',
'Domain': 'vpc',
'AllocationId': 'eipalloc-1234567'
}
]
}
DRY_RUN_MSGS = 'DryRun Mode:'
def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
states=None, check_mode=False):
"""Retrieve a list of NAT Gateways
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
subnet_id (str): The subnet_id the nat resides in.
nat_gateway_id (str): The Amazon nat id.
states (list): States available (pending, failed, available, deleting, and deleted)
default=None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> get_nat_gateways(client, subnet_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345678"
}
Returns:
Tuple (bool, str, list)
"""
params = dict()
err_msg = ""
gateways_retrieved = False
existing_gateways = list()
if not states:
states = ['available', 'pending']
if nat_gateway_id:
params['NatGatewayIds'] = [nat_gateway_id]
else:
params['Filter'] = [
{
'Name': 'subnet-id',
'Values': [subnet_id]
},
{
'Name': 'state',
'Values': states
}
]
try:
if not check_mode:
gateways = client.describe_nat_gateways(**params)['NatGateways']
if gateways:
for gw in gateways:
existing_gateways.append(camel_dict_to_snake_dict(gw))
gateways_retrieved = True
else:
gateways_retrieved = True
if nat_gateway_id:
if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
existing_gateways = DRY_RUN_GATEWAYS
elif subnet_id:
if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
existing_gateways = DRY_RUN_GATEWAYS
err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return gateways_retrieved, err_msg, existing_gateways
def wait_for_status(client, wait_timeout, nat_gateway_id, status,
check_mode=False):
"""Wait for the NAT Gateway to reach a status
Args:
client (botocore.client.EC2): Boto3 client
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
nat_gateway_id (str): The Amazon nat id.
status (str): The status to wait for.
examples. status=available, status=deleted
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> allocation_id = 'eipalloc-12345678'
>>> wait_for_status(client, subnet_id, allocation_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-12345678"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345677"
}
]
Returns:
Tuple (bool, str, dict)
"""
polling_increment_secs = 5
wait_timeout = time.time() + wait_timeout
status_achieved = False
nat_gateway = dict()
states = ['pending', 'failed', 'available', 'deleting', 'deleted']
err_msg = ""
while wait_timeout > time.time():
try:
gws_retrieved, err_msg, nat_gateways = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if gws_retrieved and nat_gateways:
nat_gateway = nat_gateways[0]
if check_mode:
nat_gateway['state'] = status
if nat_gateway.get('state') == status:
status_achieved = True
break
elif nat_gateway.get('state') == 'failed':
err_msg = nat_gateway.get('failure_message')
break
elif nat_gateway.get('state') == 'pending':
if 'failure_message' in nat_gateway:
err_msg = nat_gateway.get('failure_message')
status_achieved = False
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if not status_achieved:
err_msg = "Wait time out reached, while waiting for results"
return status_achieved, err_msg, nat_gateway
def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
check_mode=False):
"""Retrieve all NAT Gateways for a subnet.
Args:
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
(
[
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
],
False
)
Returns:
Tuple (list, bool)
"""
allocation_id_exists = False
gateways = []
states = ['available', 'pending']
gws_retrieved, _, gws = (
get_nat_gateways(
client, subnet_id, states=states, check_mode=check_mode
)
)
if not gws_retrieved:
return gateways, allocation_id_exists
for gw in gws:
for address in gw['nat_gateway_addresses']:
if allocation_id:
if address.get('allocation_id') == allocation_id:
allocation_id_exists = True
gateways.append(gw)
else:
gateways.append(gw)
return gateways, allocation_id_exists
def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
eip_address (str): The Elastic IP Address of the EIP.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> eip_address = '52.87.29.36'
>>> get_eip_allocation_id_by_address(client, eip_address)
'eipalloc-36014da3'
Returns:
Tuple (str, str)
"""
params = {
'PublicIps': [eip_address],
}
allocation_id = None
err_msg = ""
try:
if not check_mode:
allocations = client.describe_addresses(**params)['Addresses']
if len(allocations) == 1:
allocation = allocations[0]
else:
allocation = None
else:
dry_run_eip = (
DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
)
if dry_run_eip == eip_address:
allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
else:
allocation = None
if allocation:
if allocation.get('Domain') != 'vpc':
err_msg = (
"EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
.format(eip_address)
)
else:
allocation_id = allocation.get('AllocationId')
else:
err_msg = (
"EIP {0} does not exist".format(eip_address)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return allocation_id, err_msg
def allocate_eip_address(client, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocate_eip_address(client)
True
Returns:
Tuple (bool, str)
"""
ip_allocated = False
new_eip = None
err_msg = ''
params = {
'Domain': 'vpc',
}
try:
if check_mode:
ip_allocated = True
random_numbers = (
''.join(str(x) for x in random.sample(range(0, 9), 7))
)
new_eip = 'eipalloc-{0}'.format(random_numbers)
else:
new_eip = client.allocate_address(**params)['AllocationId']
ip_allocated = True
err_msg = 'eipalloc id {0} created'.format(new_eip)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_allocated, err_msg, new_eip
def release_address(client, allocation_id, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
allocation_id (str): The eip Amazon identifier.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocation_id = "eipalloc-123456"
>>> release_address(client, allocation_id)
True
Returns:
Boolean, string
"""
err_msg = ''
if check_mode:
return True, ''
ip_released = False
try:
client.describe_addresses(AllocationIds=[allocation_id])
except botocore.exceptions.ClientError as e:
# IP address likely already released
# Happens with gateway in 'deleted' state that
# still lists associations
return True, str(e)
try:
client.release_address(AllocationId=allocation_id)
ip_released = True
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_released, err_msg
def create(client, subnet_id, allocation_id, client_token=None,
wait=False, wait_timeout=0, if_exist_do_not_create=False,
check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
allocation_id (str): The eip Amazon identifier.
Kwargs:
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'SubnetId': subnet_id,
'AllocationId': allocation_id
}
request_time = datetime.datetime.utcnow()
changed = False
success = False
token_provided = False
err_msg = ""
if client_token:
token_provided = True
params['ClientToken'] = client_token
try:
if not check_mode:
result = camel_dict_to_snake_dict(client.create_nat_gateway(**params)["NatGateway"])
else:
result = DRY_RUN_GATEWAYS[0]
result['create_time'] = datetime.datetime.utcnow()
result['nat_gateway_addresses'][0]['Allocation_id'] = allocation_id
result['subnet_id'] = subnet_id
success = True
changed = True
create_time = result['create_time'].replace(tzinfo=None)
if token_provided and (request_time > create_time):
changed = False
elif wait:
success, err_msg, result = (
wait_for_status(
client, wait_timeout, result['nat_gateway_id'], 'available',
check_mode=check_mode
)
)
if success:
err_msg = (
'NAT gateway {0} created'.format(result['nat_gateway_id'])
)
except botocore.exceptions.ClientError as e:
if "IdempotentParameterMismatch" in e.message:
err_msg = (
'NAT Gateway does not support update and token has already been provided'
)
else:
err_msg = str(e)
success = False
changed = False
result = None
return success, changed, err_msg, result
def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
if_exist_do_not_create=False, wait=False, wait_timeout=0,
client_token=None, check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
eip_address (str): The Elastic IP Address of the EIP.
default = None
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-w4t12897'
>>> allocation_id = 'eipalloc-36014da3'
>>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, bool, str, list)
"""
success = False
changed = False
err_msg = ""
results = list()
if not allocation_id and not eip_address:
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
)
if len(existing_gateways) > 0 and if_exist_do_not_create:
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
else:
success, err_msg, allocation_id = (
allocate_eip_address(client, check_mode=check_mode)
)
if not success:
return success, 'False', err_msg, dict()
elif eip_address or allocation_id:
if eip_address and not allocation_id:
allocation_id, err_msg = (
get_eip_allocation_id_by_address(
client, eip_address, check_mode=check_mode
)
)
if not allocation_id:
success = False
changed = False
return success, changed, err_msg, dict()
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(
client, subnet_id, allocation_id, check_mode=check_mode
)
)
if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
success, changed, err_msg, results = create(
client, subnet_id, allocation_id, client_token,
wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
)
return success, changed, err_msg, results
def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
release_eip=False, check_mode=False):
"""Delete an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
nat_gateway_id (str): The Amazon nat id.
Kwargs:
wait (bool): Wait for the nat to be in the deleted state before returning.
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
Basic Usage:
>>> client = boto3.client('ec2')
>>> nat_gw_id = 'nat-03835afb6e31df79b'
>>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'NatGatewayId': nat_gateway_id
}
success = False
changed = False
err_msg = ""
results = list()
states = ['pending', 'available']
try:
exist, _, gw = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if exist and len(gw) == 1:
results = gw[0]
if not check_mode:
client.delete_nat_gateway(**params)
allocation_id = (
results['nat_gateway_addresses'][0]['allocation_id']
)
changed = True
success = True
err_msg = (
'NAT gateway {0} is in a deleting state. Delete was successful'
.format(nat_gateway_id)
)
if wait:
status_achieved, err_msg, results = (
wait_for_status(
client, wait_timeout, nat_gateway_id, 'deleted',
check_mode=check_mode
)
)
if status_achieved:
err_msg = (
'NAT gateway {0} was deleted successfully'
.format(nat_gateway_id)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if release_eip:
eip_released, eip_err = (
release_address(client, allocation_id, check_mode)
)
if not eip_released:
err_msg = (
"{0}: Failed to release EIP {1}: {2}"
.format(err_msg, allocation_id, eip_err)
)
success = False
return success, changed, err_msg, results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
subnet_id=dict(type='str'),
eip_address=dict(type='str'),
allocation_id=dict(type='str'),
if_exist_do_not_create=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=320, required=False),
release_eip=dict(type='bool', default=False),
nat_gateway_id=dict(type='str'),
client_token=dict(type='str'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['allocation_id', 'eip_address']
],
required_if=[['state', 'absent', ['nat_gateway_id']],
['state', 'present', ['subnet_id']]]
)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore/boto3 is required.')
state = module.params.get('state').lower()
check_mode = module.check_mode
subnet_id = module.params.get('subnet_id')
allocation_id = module.params.get('allocation_id')
eip_address = module.params.get('eip_address')
nat_gateway_id = module.params.get('nat_gateway_id')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
release_eip = module.params.get('release_eip')
client_token = module.params.get('client_token')
if_exist_do_not_create = module.params.get('if_exist_do_not_create')
try:
region, ec2_url, aws_connect_kwargs = (
get_aws_connection_info(module, boto3=True)
)
client = (
boto3_conn(
module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs
)
)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
changed = False
err_msg = ''
if state == 'present':
success, changed, err_msg, results = (
pre_create(
client, subnet_id, allocation_id, eip_address,
if_exist_do_not_create, wait, wait_timeout,
client_token, check_mode=check_mode
)
)
else:
success, changed, err_msg, results = (
remove(
client, nat_gateway_id, wait, wait_timeout, release_eip,
check_mode=check_mode
)
)
if not success:
module.fail_json(
msg=err_msg, success=success, changed=changed
)
else:
module.exit_json(
msg=err_msg, success=success, changed=changed, **results
)
if __name__ == '__main__':
main()
|
gpl-3.0
|
aabbox/kbengine
|
kbe/src/lib/python/Lib/turtledemo/tree.py
|
85
|
1425
|
#!/usr/bin/env python3
""" turtle-example-suite:
tdemo_tree.py
Displays a 'breadth-first-tree' - in contrast
to the classical Logo tree drawing programs,
which use a depth-first-algorithm.
Uses:
(1) a tree-generator, where the drawing is
quasi the side-effect, whereas the generator
always yields None.
(2) Turtle-cloning: At each branching point
the current pen is cloned. So in the end
there are 1024 turtles.
"""
from turtle import Turtle, mainloop
from time import clock
def tree(plist, l, a, f):
""" plist is list of pens
l is length of branch
a is half of the angle between 2 branches
f is factor by which branch is shortened
from level to level."""
if l > 3:
lst = []
for p in plist:
p.forward(l)
q = p.clone()
p.left(a)
q.right(a)
lst.append(p)
lst.append(q)
for x in tree(lst, l*f, a, f):
yield None
def maketree():
p = Turtle()
p.setundobuffer(None)
p.hideturtle()
p.speed(0)
p.getscreen().tracer(30,0)
p.left(90)
p.penup()
p.forward(-210)
p.pendown()
t = tree([p], 200, 65, 0.6375)
for x in t:
pass
print(len(p.getscreen().turtles()))
def main():
a=clock()
maketree()
b=clock()
return "done: %.2f sec." % (b-a)
if __name__ == "__main__":
msg = main()
print(msg)
mainloop()
|
lgpl-3.0
|
vveerava/Openstack
|
neutron/agent/linux/external_process.py
|
5
|
10207
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import eventlet
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.i18n import _LE
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
cfg.StrOpt('check_child_processes_action', default='respawn',
choices=['respawn', 'exit'],
help=_('Action to be executed when a child process dies')),
cfg.IntOpt('check_child_processes_interval', default=0,
help=_('Interval between checks of child process liveness '
'(seconds), use 0 to disable')),
]
cfg.CONF.register_opts(OPTS)
class ProcessManager(object):
"""An external process manager for Neutron spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, root_helper='sudo',
namespace=None, service=None, pids_path=None,
default_cmd_callback=None,
cmd_addl_env=None):
self.conf = conf
self.uuid = uuid
self.root_helper = root_helper
self.namespace = namespace
self.default_cmd_callback = default_cmd_callback
self.cmd_addl_env = cmd_addl_env
self.pids_path = pids_path or self.conf.external_pids
if service:
self.service_pid_fname = 'pid.' + service
self.service = service
else:
self.service_pid_fname = 'pid'
self.service = 'default-service'
def enable(self, cmd_callback=None, reload_cfg=False):
if not self.active:
if not cmd_callback:
cmd_callback = self.default_cmd_callback
cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True))
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env)
elif reload_cfg:
self.reload_cfg()
def reload_cfg(self):
self.disable('HUP')
def disable(self, sig='9'):
pid = self.pid
if self.active:
cmd = ['kill', '-%s' % (sig), pid]
utils.execute(cmd, self.root_helper)
# In the case of shutting down, remove the pid file
if sig == '9':
utils.remove_conf_file(self.pids_path,
self.uuid,
self.service_pid_fname)
elif pid:
LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'signal %(signal)s', {'uuid': self.uuid, 'pid': pid,
'signal': sig})
else:
LOG.debug('No process started for %s', self.uuid)
def get_pid_file_name(self, ensure_pids_dir=False):
"""Returns the file name for a given kind of config file."""
return utils.get_conf_file_name(self.pids_path,
self.uuid,
self.service_pid_fname,
ensure_pids_dir)
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
return utils.get_value_from_conf_file(self.pids_path,
self.uuid,
self.service_pid_fname,
int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.uuid in f.readline()
except IOError:
return False
ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service'])
class ProcessMonitor(object):
def __init__(self, config, root_helper, resource_type, exit_handler):
"""Handle multiple process managers and watch over all of them.
:param config: oslo config object with the agent configuration.
:type config: oslo.config.ConfigOpts
:param root_helper: root helper to be used with new ProcessManagers
:type root_helper: str
:param resource_type: can be dhcp, router, load_balancer, etc.
:type resource_type: str
:param exit_handler: function to execute when agent exit has to
be executed, it should take care of actual
exit
:type exit_hanlder: function
"""
self._config = config
self._root_helper = root_helper
self._resource_type = resource_type
self._exit_handler = exit_handler
self._process_managers = {}
if self._config.check_child_processes_interval:
self._spawn_checking_thread()
def enable(self, uuid, cmd_callback, namespace=None, service=None,
reload_cfg=False, cmd_addl_env=None):
"""Creates a process and ensures that it is monitored.
It will create a new ProcessManager and tie it to the uuid/service.
"""
process_manager = ProcessManager(conf=self._config,
uuid=uuid,
root_helper=self._root_helper,
namespace=namespace,
service=service,
default_cmd_callback=cmd_callback,
cmd_addl_env=cmd_addl_env)
process_manager.enable(reload_cfg=reload_cfg)
service_id = ServiceId(uuid, service)
self._process_managers[service_id] = process_manager
def disable(self, uuid, namespace=None, service=None):
"""Disables the process and stops monitoring it."""
service_id = ServiceId(uuid, service)
process_manager = self._process_managers.pop(service_id, None)
# we could be trying to disable a process_manager which was
# started on a separate run of this agent, or during netns-cleanup
# therefore we won't know about such uuid and we need to
# build the process_manager to kill it
if not process_manager:
process_manager = ProcessManager(conf=self._config,
uuid=uuid,
root_helper=self._root_helper,
namespace=namespace,
service=service)
process_manager.disable()
def disable_all(self):
for service_id in self._process_managers.keys():
self.disable(uuid=service_id.uuid, service=service_id.service)
def get_process_manager(self, uuid, service=None):
"""Returns a process manager for manipulation"""
service_id = ServiceId(uuid, service)
return self._process_managers.get(service_id)
def _get_process_manager_attribute(self, attribute, uuid, service=None):
process_manager = self.get_process_manager(uuid, service)
if process_manager:
return getattr(process_manager, attribute)
else:
return False
def is_active(self, uuid, service=None):
return self._get_process_manager_attribute('active', uuid, service)
def get_pid(self, uuid, service=None):
return self._get_process_manager_attribute('pid', uuid, service)
def _spawn_checking_thread(self):
eventlet.spawn(self._periodic_checking_thread)
@lockutils.synchronized("_check_child_processes")
def _check_child_processes(self):
# we build the list of keys before iterating in the loop to cover
# the case where other threads add or remove items from the
# dictionary which otherwise will cause a RuntimeError
for service_id in list(self._process_managers):
pm = self._process_managers.get(service_id)
if pm and not pm.active:
LOG.error(_LE("%(service)s for %(resource_type)s "
"with uuid %(uuid)s not found. "
"The process should not have died"),
{'service': pm.service,
'resource_type': self._resource_type,
'uuid': service_id.uuid})
self._execute_action(service_id)
eventlet.sleep(0)
def _periodic_checking_thread(self):
while True:
eventlet.sleep(self._config.check_child_processes_interval)
eventlet.spawn(self._check_child_processes)
def _execute_action(self, service_id):
action_function = getattr(
self, "_%s_action" % self._config.check_child_processes_action)
action_function(service_id)
def _respawn_action(self, service_id):
LOG.error(_LE("respawning %(service)s for uuid %(uuid)s"),
{'service': service_id.service,
'uuid': service_id.uuid})
self._process_managers[service_id].enable()
def _exit_action(self, service_id):
LOG.error(_LE("Exiting agent as programmed in check_child_processes_"
"actions"))
self._exit_handler(service_id.uuid, service_id.service)
|
apache-2.0
|
sapfo/medeas
|
processing/visualstack.py
|
1
|
1770
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 21 18:35:25 2017
@author: ivan
"""
import numpy as np
import sys
import matplotlib.pyplot as plt
with open('haplotype_labels.txt') as f:
labels0 = f.readlines()
labels = np.array([l.split()[1] for l in labels0])
groups = np.array([l.split()[0] for l in labels0])
labels = labels[np.where(groups == 'ABO')]
chunk_size = 1000
samples = int(sys.argv[1]), int(sys.argv[2])
chromosomes = range(1, 23)
sample_labels = labels[np.array(samples)]
aspect = 200
f, axarr = plt.subplots(2*len(chromosomes), sharex=True)
for order, num in enumerate(chromosomes):
ancestry = np.genfromtxt(f'eur.chi.pap.wcd.abo.chr{num}.g10.txt.0.Viterbi.txt',
dtype='int8')
for subord, index in enumerate(samples):
print(f'Processed chromosome #{num}, outlier #{index}')
ancestry_smooth = np.zeros((ancestry.T[index].shape[0]//chunk_size,))
for i in range(ancestry_smooth.shape[0]):
ancestry_smooth[i:i+1] = np.sum(ancestry.T[index][chunk_size*i:
chunk_size*i+chunk_size],
axis=0)
i = 2*order + subord
axarr[i].get_yaxis().set_ticks([])
axarr[i].set_aspect(aspect)
axarr[i].set_ylim([0, 1])
axarr[i].pcolormesh(ancestry_smooth.reshape(1, ancestry_smooth.shape[0]),
vmin=chunk_size, vmax=4*chunk_size)
axarr[2*len(chromosomes)-1].set_xlabel(f'Site/{chunk_size}')
axarr[len(chromosomes)].set_ylabel('Chromosomes')
axarr[0].set_title(f'Haplogroups {sample_labels}')
plt.savefig(f'Ancestry.outliers.samples.{samples[0]}.{samples[1]}.pdf')
print('Figure saved')
|
gpl-3.0
|
pquentin/libcloud
|
libcloud/test/compute/test_indosat.py
|
12
|
1311
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from libcloud.compute.drivers.indosat import IndosatNodeDriver
from libcloud.test.compute.test_dimensiondata_v2_3 import DimensionDataMockHttp, DimensionData_v2_3_Tests
class IndosatNodeDriverTests(DimensionData_v2_3_Tests, unittest.TestCase):
def setUp(self):
IndosatNodeDriver.connectionCls.conn_class = DimensionDataMockHttp
IndosatNodeDriver.connectionCls.active_api_version = '2.3'
DimensionDataMockHttp.type = None
self.driver = IndosatNodeDriver('user', 'password')
|
apache-2.0
|
Jobava/bedrock
|
bedrock/mozorg/tests/test_views.py
|
11
|
46992
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import date
import json
import basket
from django.conf import settings
from django.core import mail
from django.core.cache import cache
from django.db.utils import DatabaseError
from django.http.response import Http404
from django.test.client import RequestFactory
from django.test.utils import override_settings
from captcha.fields import ReCaptchaField
from bedrock.base.urlresolvers import reverse
from jinja2.exceptions import TemplateNotFound
from requests.exceptions import Timeout
from mock import ANY, Mock, patch
from nose.tools import assert_false, eq_, ok_
from bedrock.mozorg.tests import TestCase
from bedrock.mozorg import views
from lib import l10n_utils
from scripts import update_tableau_data
_ALL = settings.STUB_INSTALLER_ALL
class TestContributeTasks(TestCase):
def setUp(self):
self.rf = RequestFactory()
self.view = views.ContributeTasks()
def test_query_param(self):
self.view.request = self.rf.get('/', {'variation': '3'})
cxt = self.view.get_context_data()
self.assertEqual(cxt['variation'], '3')
def test_no_query_param(self):
self.view.request = self.rf.get('/')
cxt = self.view.get_context_data()
self.assertEqual(cxt['variation'], '4')
def test_invalid_query_param(self):
self.view.request = self.rf.get('/', {'variation': 'dude'})
cxt = self.view.get_context_data()
self.assertNotIn('variation', cxt)
class TestContributeTasksSurvey(TestCase):
def setUp(self):
self.rf = RequestFactory()
self.view = views.ContributeTasksSurvey()
def test_query_param(self):
self.view.request = self.rf.get('/', {'task': '3'})
cxt = self.view.get_context_data()
self.assertEqual(cxt['task'], '3')
def test_no_query_param(self):
self.view.request = self.rf.get('/')
cxt = self.view.get_context_data()
self.assertNotIn('task', cxt)
def test_invalid_query_param(self):
self.view.request = self.rf.get('/', {'task': 'dude'})
cxt = self.view.get_context_data()
self.assertNotIn('task', cxt)
@patch('bedrock.mozorg.views.basket.request')
def test_basket_data(self, basket_mock):
req = self.rf.post('/', {
'email': '[email protected]',
'name': 'The Dude',
'privacy': 'yes',
'country': 'us',
})
resp = views.ContributeTasksSurvey.as_view()(req)
self.assertEqual(resp.status_code, 302)
basket_mock.assert_called_with('post', 'get-involved', {
'email': '[email protected]',
'name': 'The Dude',
'country': 'us',
'interest_id': 'dontknow',
'lang': 'en-US',
'source_url': 'http://testserver/',
})
@patch('bedrock.mozorg.views.basket.request')
def test_privacy_required(self, basket_mock):
req = self.rf.post('/', {
'email': '[email protected]',
'name': 'The Dude',
'country': 'us',
})
resp = views.ContributeTasksSurvey.as_view()(req)
self.assertEqual(resp.status_code, 200)
self.assertFalse(basket_mock.called)
@patch('bedrock.mozorg.views.basket.request')
def test_basket_error(self, basket_mock):
basket_mock.side_effect = basket.BasketException
req = self.rf.post('/', {
'email': '[email protected]',
'name': 'The Dude',
'privacy': 'yes',
'country': 'us',
})
with self.activate('en-US'):
resp = views.ContributeTasksSurvey.as_view()(req)
self.assertEqual(resp.status_code, 200)
self.assertIn('We apologize, but an error occurred in our system.', resp.content)
self.assertTrue(basket_mock.called)
@patch('bedrock.mozorg.views.basket.request')
def test_basket_email_error(self, basket_mock):
basket_mock.side_effect = basket.BasketException(code=basket.errors.BASKET_INVALID_EMAIL)
req = self.rf.post('/', {
'email': '[email protected]',
'name': 'The Dude',
'privacy': 'yes',
'country': 'us',
})
with self.activate('en-US'):
resp = views.ContributeTasksSurvey.as_view()(req)
self.assertEqual(resp.status_code, 200)
self.assertIn('Whoops! Be sure to enter a valid email address.', resp.content)
self.assertTrue(basket_mock.called)
class TestContributeSignup(TestCase):
def setUp(self):
self.rf = RequestFactory()
@patch('lib.l10n_utils.render')
def test_thankyou_get_proper_context(self, render_mock):
"""category added to context from querystring on thankyou."""
view = views.ContributeSignupThankyou.as_view()
req = self.rf.get('/thankyou/?c=thedude')
view(req)
render_mock.assert_called_with(req, [views.ContributeSignupThankyou.template_name],
{'category': 'thedude', 'view': ANY})
# too long
req = self.rf.get('/thankyou/?c=thisismuchtoolongtogetintocontext')
view(req)
render_mock.assert_called_with(req, [views.ContributeSignupThankyou.template_name],
{'view': ANY})
# bad characters
req = self.rf.get('/thankyou/?c=this-is-bad')
view(req)
render_mock.assert_called_with(req, [views.ContributeSignupThankyou.template_name],
{'view': ANY})
@patch.object(views, 'basket')
def test_send_to_basket(self, basket_mock):
req = self.rf.post('/', {
'name': 'The Dude',
'email': '[email protected]',
'privacy': 'Yes',
'category': 'dontknow',
'country': 'us',
'format': 'T',
})
req.locale = 'en-US'
resp = views.ContributeSignup.as_view()(req)
self.assertEqual(resp.status_code, 302)
self.assertTrue(resp['location'].endswith('?c=dontknow'))
basket_mock.request.assert_called_with('post', 'get-involved', {
'name': 'The Dude',
'email': '[email protected]',
'interest_id': 'dontknow',
'lang': 'en-US',
'country': 'us',
'message': '',
'source_url': 'http://testserver/',
'format': 'T',
})
@patch.object(views, 'basket')
def test_invalid_form_no_basket(self, basket_mock):
# 'coding' requires area_coding field.
req = self.rf.post('/', {
'name': 'The Dude',
'email': '[email protected]',
'privacy': 'Yes',
'category': 'coding',
'country': 'us',
})
resp = views.ContributeSignup.as_view()(req)
self.assertEqual(resp.status_code, 200)
self.assertFalse(basket_mock.called)
# 'privacy' required
req = self.rf.post('/', {
'name': 'The Dude',
'email': '[email protected]',
'category': 'dontknow',
'country': 'us',
})
resp = views.ContributeSignup.as_view()(req)
self.assertEqual(resp.status_code, 200)
self.assertFalse(basket_mock.called)
# 'email' required
req = self.rf.post('/', {
'name': 'The Dude',
'privacy': 'Yes',
'category': 'dontknow',
'country': 'us',
})
resp = views.ContributeSignup.as_view()(req)
self.assertEqual(resp.status_code, 200)
self.assertFalse(basket_mock.called)
class TestContributeSignupSwitcher(TestCase):
def setUp(self):
self.rf = RequestFactory()
patcher = patch('bedrock.mozorg.views.lang_file_has_tag')
self.lang_file_has_tag = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch('bedrock.mozorg.views.ContributeSignup')
self.ContributeSignup = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch('bedrock.mozorg.views.ContributeSignupOldForm')
self.ContributeSignupOldForm = patcher.start()
self.addCleanup(patcher.stop)
def test_uses_new_view(self):
"""Uses new view when lang file has the tag."""
req = self.rf.get('/')
self.lang_file_has_tag.return_value = True
views.contribute_signup(req)
self.ContributeSignup.as_view.return_value.assert_called_with(req)
self.assertFalse(self.ContributeSignupOldForm.as_view.return_value.called)
def test_uses_old_view(self):
"""Uses old view when lang file does not have the tag."""
req = self.rf.get('/')
self.lang_file_has_tag.return_value = False
views.contribute_signup(req)
self.ContributeSignupOldForm.as_view.return_value.assert_called_with(req)
self.assertFalse(self.ContributeSignup.as_view.return_value.called)
@patch('bedrock.mozorg.views.l10n_utils.render')
class TestHome(TestCase):
def setUp(self):
self.rf = RequestFactory()
@override_settings(MOBILIZER_LOCALE_LINK={'en-US': 'His Dudeness', 'de': 'Herr Dude'})
def test_gets_right_mobilizer_url(self, resp_mock):
"""Home page should get correct mobilizer link for locale."""
req = self.rf.get('/')
req.locale = 'de'
views.home(req)
ctx = resp_mock.call_args[0][2]
self.assertEqual(ctx['mobilizer_link'], 'Herr Dude')
@override_settings(MOBILIZER_LOCALE_LINK={'en-US': 'His Dudeness', 'de': 'Herr Dude'})
def test_gets_default_mobilizer_url(self, resp_mock):
"""Home page should get default mobilizer link for other locale."""
req = self.rf.get('/')
req.locale = 'xx' # does not exist
views.home(req)
ctx = resp_mock.call_args[0][2]
self.assertEqual(ctx['mobilizer_link'], 'His Dudeness')
class TestViews(TestCase):
def test_hacks_newsletter_frames_allow(self):
"""
Bedrock pages get the 'x-frame-options: DENY' header by default.
The hacks newsletter page is framed, so needs to ALLOW.
"""
with self.activate('en-US'):
resp = self.client.get(reverse('mozorg.hacks_newsletter'))
ok_('x-frame-options' not in resp)
@override_settings(STUB_INSTALLER_LOCALES={'win': _ALL})
def test_download_button_funnelcake(self):
"""The download button should have the funnelcake ID."""
with self.activate('en-US'):
resp = self.client.get(reverse('mozorg.home'), {'f': '5'})
ok_('product=firefox-stub-f5&' in resp.content)
@override_settings(STUB_INSTALLER_LOCALES={'win': _ALL})
def test_download_button_bad_funnelcake(self):
"""The download button should not have a bad funnelcake ID."""
with self.activate('en-US'):
resp = self.client.get(reverse('mozorg.home'), {'f': '5dude'})
ok_('product=firefox-stub&' in resp.content)
ok_('product=firefox-stub-f5dude&' not in resp.content)
resp = self.client.get(reverse('mozorg.home'), {'f': '999999999'})
ok_('product=firefox-stub&' in resp.content)
ok_('product=firefox-stub-f999999999&' not in resp.content)
class TestStudentAmbassadorsJoin(TestCase):
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.forms.request')
@patch('bedrock.mozorg.forms.basket.subscribe')
def test_subscribe(self, mock_subscribe, mock_request):
mock_subscribe.return_value = {'token': 'token-example',
'status': 'ok',
'created': 'True'}
data = {'email': u'[email protected]',
'country': 'gr',
'fmt': 'H',
'first_name': 'foo',
'last_name': 'bar',
'status': 'teacher',
'school': 'TuC',
'city': 'Chania',
'age_confirmation': 'on',
'grad_year': '',
'nl_about_mozilla': 'on',
'major': '',
'major_free_text': '',
'privacy': 'True'}
request_data = {'FIRST_NAME': data['first_name'],
'LAST_NAME': data['last_name'],
'STUDENTS_CURRENT_STATUS': data['status'],
'STUDENTS_SCHOOL': data['school'],
'STUDENTS_GRAD_YEAR': data['grad_year'],
'STUDENTS_MAJOR': data['major'],
'COUNTRY_': data['country'],
'STUDENTS_CITY': data['city'],
'STUDENTS_ALLOW_SHARE': 'N'}
with self.activate('en-US'):
self.client.post(reverse('mozorg.contribute.studentambassadors.join'), data)
mock_subscribe.assert_called_with(
data['email'], ['ambassadors', 'about-mozilla'], format=u'H',
country=u'gr', source_url=u'', sync='Y',
welcome_message='Student_Ambassadors_Welcome')
mock_request.assert_called_with('post',
'custom_update_student_ambassadors',
token='token-example',
data=request_data)
class TestContributeStudentAmbassadorsLanding(TestCase):
def setUp(self):
self.rf = RequestFactory()
self.get_req = self.rf.get('/')
self.no_exist = views.TwitterCache.DoesNotExist()
cache.clear()
@patch.object(views.l10n_utils, 'render')
@patch.object(views.TwitterCache.objects, 'get')
def test_db_exception_works(self, mock_manager, mock_render):
"""View should function properly without the DB."""
mock_manager.side_effect = DatabaseError
views.contribute_studentambassadors_landing(self.get_req)
mock_render.assert_called_with(ANY, ANY, {'tweets': []})
@patch.object(views.l10n_utils, 'render')
@patch.object(views.TwitterCache.objects, 'get')
def test_no_db_row_works(self, mock_manager, mock_render):
"""View should function properly without data in the DB."""
mock_manager.side_effect = views.TwitterCache.DoesNotExist
views.contribute_studentambassadors_landing(self.get_req)
mock_render.assert_called_with(ANY, ANY, {'tweets': []})
@patch.object(views.l10n_utils, 'render')
@patch.object(views.TwitterCache.objects, 'get')
def test_db_cache_works(self, mock_manager, mock_render):
"""View should use info returned by DB."""
good_val = 'The Dude tweets, man.'
mock_manager.return_value.tweets = good_val
views.contribute_studentambassadors_landing(self.get_req)
mock_render.assert_called_with(ANY, ANY, {'tweets': good_val})
@patch.object(views, 'lang_file_is_active', lambda *x: False)
class TestContributeOldPage(TestCase):
def setUp(self):
with self.activate('en-US'):
self.url_en = reverse('mozorg.contribute')
with self.activate('pt-BR'):
self.url_pt_br = reverse('mozorg.contribute')
self.contact = '[email protected]'
self.data = {
'contribute-form': 'Y',
'email': self.contact,
'interest': 'coding',
'privacy': True,
'comments': 'Wesh!',
}
def tearDown(self):
mail.outbox = []
@patch.object(ReCaptchaField, 'clean', Mock())
def test_with_autoresponse(self):
"""Test contacts for functional area with autoresponses"""
self.data.update(interest='support')
self.client.post(self.url_en, self.data)
eq_(len(mail.outbox), 2)
cc = ['[email protected]']
m = mail.outbox[0]
eq_(m.from_email, '[email protected]')
eq_(m.to, ['[email protected]'])
eq_(m.cc, cc)
eq_(m.extra_headers['Reply-To'], self.contact)
m = mail.outbox[1]
eq_(m.from_email, '[email protected]')
eq_(m.to, [self.contact])
eq_(m.cc, [])
eq_(m.extra_headers['Reply-To'], ','.join(['[email protected]'] +
cc))
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.basket.subscribe')
@patch('bedrock.mozorg.email_contribute.requests.post')
def test_webmaker_mentor_signup(self, mock_post, mock_subscribe):
"""Test Webmaker Mentor signup form for education functional area"""
self.data.update(interest='education', newsletter=True)
self.client.post(self.url_en, self.data)
assert_false(mock_subscribe.called)
payload = {'email': self.contact, 'custom-1788': '1'}
mock_post.assert_called_with('https://sendto.mozilla.org/page/s/mentor-signup',
data=payload, timeout=2)
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.basket.subscribe')
@patch('bedrock.mozorg.email_contribute.requests.post')
def test_webmaker_mentor_signup_newsletter_fail(self, mock_post, mock_subscribe):
"""Test Webmaker Mentor signup form when newsletter is not selected"""
self.data.update(interest='education', newsletter=False)
self.client.post(self.url_en, self.data)
assert_false(mock_subscribe.called)
assert_false(mock_post.called)
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.basket.subscribe')
@patch('bedrock.mozorg.email_contribute.requests.post')
def test_webmaker_mentor_signup_functional_area_fail(self, mock_post, mock_subscribe):
"""Test Webmaker Mentor signup form when functional area is not education"""
self.data.update(interest='coding', newsletter=True)
self.client.post(self.url_en, self.data)
mock_subscribe.assert_called_with(self.contact, 'about-mozilla', source_url=ANY)
assert_false(mock_post.called)
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.basket.subscribe')
@patch('bedrock.mozorg.email_contribute.requests.post')
def test_webmaker_mentor_signup_timeout_fail(self, mock_post, mock_subscribe):
"""Test Webmaker Mentor signup form when request times out"""
mock_post.side_effect = Timeout('Timeout')
self.data.update(interest='education', newsletter=True)
res = self.client.post(self.url_en, self.data)
assert_false(mock_subscribe.called)
eq_(res.status_code, 200)
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.jingo.render_to_string')
def test_no_autoresponse_locale(self, render_mock):
"""
L10N version to test contacts for functional area without autoresponses
"""
# first value is for send() and 2nd is for autorespond()
render_mock.side_effect = ['The Dude minds, man!',
TemplateNotFound('coding.txt')]
self.data.update(interest='coding')
self.client.post(self.url_pt_br, self.data)
eq_(len(mail.outbox), 1)
m = mail.outbox[0]
eq_(m.from_email, '[email protected]')
eq_(m.to, ['[email protected]'])
eq_(m.cc, ['[email protected]'])
eq_(m.extra_headers['Reply-To'], self.contact)
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.jingo.render_to_string')
def test_with_autoresponse_locale(self, render_mock):
"""
L10N version to test contacts for functional area with autoresponses
"""
render_mock.side_effect = 'The Dude abides.'
self.data.update(interest='support')
self.client.post(self.url_pt_br, self.data)
eq_(len(mail.outbox), 2)
cc = ['[email protected]']
m = mail.outbox[0]
eq_(m.from_email, '[email protected]')
eq_(m.to, ['[email protected]'])
eq_(m.cc, cc)
eq_(m.extra_headers['Reply-To'], self.contact)
m = mail.outbox[1]
eq_(m.from_email, '[email protected]')
eq_(m.to, [self.contact])
eq_(m.cc, [])
eq_(m.extra_headers['Reply-To'], ','.join(['[email protected]'] +
cc))
@patch.object(ReCaptchaField, 'clean', Mock())
def test_emails_not_escaped(self):
"""
Strings in the contribute form should not be HTML escaped
when inserted into the email, which is just text.
E.g. if they entered
J'adore le ''Renard de feu''
the email should not contain
J'adore le ''Renard de feu''
Tags are still stripped, though.
"""
STRING = u"<strong>J'adore Citröns</strong> & <Piñatas> so there"
EXPECTED = u"J'adore Citröns & so there"
self.data.update(comments=STRING)
self.client.post(self.url_en, self.data)
eq_(len(mail.outbox), 2)
m = mail.outbox[0]
self.assertIn(EXPECTED, m.body)
@patch.object(l10n_utils.dotlang, 'lang_file_is_active', lambda *x: True)
@patch.object(views, 'lang_file_has_tag', lambda *x: False)
class TestContribute(TestCase):
def setUp(self):
with self.activate('en-US'):
self.url_en = reverse('mozorg.contribute.signup')
with self.activate('pt-BR'):
self.url_pt_br = reverse('mozorg.contribute.signup')
self.contact = '[email protected]'
self.data = {
'contribute-form': 'Y',
'email': self.contact,
'interest': 'coding',
'privacy': True,
'comments': 'Wesh!',
}
def tearDown(self):
mail.outbox = []
@patch.object(ReCaptchaField, 'clean', Mock())
def test_with_autoresponse(self):
"""Test contacts for functional area with autoresponses"""
self.data.update(interest='support')
self.client.post(self.url_en, self.data)
eq_(len(mail.outbox), 2)
cc = ['[email protected]']
m = mail.outbox[0]
eq_(m.from_email, '[email protected]')
eq_(m.to, ['[email protected]'])
eq_(m.cc, cc)
eq_(m.extra_headers['Reply-To'], self.contact)
m = mail.outbox[1]
eq_(m.from_email, '[email protected]')
eq_(m.to, [self.contact])
eq_(m.cc, [])
eq_(m.extra_headers['Reply-To'], ','.join(['[email protected]'] +
cc))
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.basket.subscribe')
@patch('bedrock.mozorg.email_contribute.requests.post')
def test_webmaker_mentor_signup(self, mock_post, mock_subscribe):
"""Test Webmaker Mentor signup form for education functional area"""
self.data.update(interest='education', newsletter=True)
self.client.post(self.url_en, self.data)
assert_false(mock_subscribe.called)
payload = {'email': self.contact, 'custom-1788': '1'}
mock_post.assert_called_with('https://sendto.mozilla.org/page/s/mentor-signup',
data=payload, timeout=2)
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.basket.subscribe')
@patch('bedrock.mozorg.email_contribute.requests.post')
def test_webmaker_mentor_signup_newsletter_fail(self, mock_post, mock_subscribe):
"""Test Webmaker Mentor signup form when newsletter is not selected"""
self.data.update(interest='education', newsletter=False)
self.client.post(self.url_en, self.data)
assert_false(mock_subscribe.called)
assert_false(mock_post.called)
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.basket.subscribe')
@patch('bedrock.mozorg.email_contribute.requests.post')
def test_webmaker_mentor_signup_functional_area_fail(self, mock_post, mock_subscribe):
"""Test Webmaker Mentor signup form when functional area is not education"""
self.data.update(interest='coding', newsletter=True)
self.client.post(self.url_en, self.data)
mock_subscribe.assert_called_with(self.contact, 'about-mozilla', source_url=ANY)
assert_false(mock_post.called)
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.basket.subscribe')
@patch('bedrock.mozorg.email_contribute.requests.post')
def test_webmaker_mentor_signup_timeout_fail(self, mock_post, mock_subscribe):
"""Test Webmaker Mentor signup form when request times out"""
mock_post.side_effect = Timeout('Timeout')
self.data.update(interest='education', newsletter=True)
res = self.client.post(self.url_en, self.data)
assert_false(mock_subscribe.called)
eq_(res.status_code, 302) # redirect to thankyou page
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.jingo.render_to_string')
def test_no_autoresponse_locale(self, render_mock):
"""
L10N version to test contacts for functional area without autoresponses
"""
# first value is for send() and 2nd is for autorespond()
render_mock.side_effect = ['The Dude minds, man!',
TemplateNotFound('coding.txt')]
self.data.update(interest='coding')
self.client.post(self.url_pt_br, self.data)
eq_(len(mail.outbox), 1)
m = mail.outbox[0]
eq_(m.from_email, '[email protected]')
eq_(m.to, ['[email protected]'])
eq_(m.cc, ['[email protected]'])
eq_(m.extra_headers['Reply-To'], self.contact)
@patch.object(ReCaptchaField, 'clean', Mock())
@patch('bedrock.mozorg.email_contribute.jingo.render_to_string')
def test_with_autoresponse_locale(self, render_mock):
"""
L10N version to test contacts for functional area with autoresponses
"""
render_mock.side_effect = 'The Dude abides.'
self.data.update(interest='support')
self.client.post(self.url_pt_br, self.data)
eq_(len(mail.outbox), 2)
cc = ['[email protected]']
m = mail.outbox[0]
eq_(m.from_email, '[email protected]')
eq_(m.to, ['[email protected]'])
eq_(m.cc, cc)
eq_(m.extra_headers['Reply-To'], self.contact)
m = mail.outbox[1]
eq_(m.from_email, '[email protected]')
eq_(m.to, [self.contact])
eq_(m.cc, [])
eq_(m.extra_headers['Reply-To'], ','.join(['[email protected]'] +
cc))
@patch.object(ReCaptchaField, 'clean', Mock())
def test_honeypot(self):
"""
If honeypot is triggered no email should go out but page should proceed normally.
"""
self.data.update(interest='support')
self.data.update(office_fax='Yes')
resp = self.client.post(self.url_en, self.data)
eq_(len(mail.outbox), 0)
eq_(resp.status_code, 302)
ok_(resp['location'].endswith('/thankyou/'))
@patch.object(ReCaptchaField, 'clean', Mock())
def test_emails_not_escaped(self):
"""
Strings in the contribute form should not be HTML escaped
when inserted into the email, which is just text.
E.g. if they entered
J'adore le ''Renard de feu''
the email should not contain
J'adore le ''Renard de feu''
Tags are still stripped, though.
"""
STRING = u"<em>J'adore Citröns</em> & <Piñatas> so there"
EXPECTED = u"J'adore Citröns & so there"
self.data.update(comments=STRING)
self.client.post(self.url_en, self.data)
eq_(len(mail.outbox), 2)
m = mail.outbox[0]
self.assertIn(EXPECTED, m.body)
class TestRobots(TestCase):
@override_settings(SITE_URL='https://www.mozilla.org')
def test_production_disallow_all_is_false(self):
self.assertFalse(views.Robots().get_context_data()['disallow_all'])
@override_settings(SITE_URL='http://mozilla.local')
def test_non_production_disallow_all_is_true(self):
self.assertTrue(views.Robots().get_context_data()['disallow_all'])
@override_settings(SITE_URL='https://www.mozilla.org')
def test_robots_no_redirect(self):
response = self.client.get('/robots.txt')
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context_data['disallow_all'])
self.assertEqual(response.get('Content-Type'), 'text/plain')
class TestProcessPartnershipForm(TestCase):
def setUp(self):
patcher = patch('bedrock.mozorg.views.requests.post')
self.addCleanup(patcher.stop)
self.requests_mock = patcher.start()
self.requests_mock.return_value.status_code = 200
self.factory = RequestFactory()
self.template = 'mozorg/partnerships.html'
self.view = 'mozorg.partnerships'
self.post_data = {
'first_name': 'The',
'last_name': 'Dude',
'title': 'Abider of things',
'company': 'Urban Achievers',
'email': '[email protected]',
}
self.invalid_post_data = {
'first_name': 'The',
'last_name': 'Dude',
'title': 'Abider of things',
'company': 'Urban Achievers',
'email': 'thedude',
}
with self.activate('en-US'):
self.url = reverse(self.view)
def test_get(self):
"""
A GET request should simply return a 200.
"""
request = self.factory.get(self.url)
request.locale = 'en-US'
response = views.process_partnership_form(request, self.template,
self.view)
self.assertEqual(response.status_code, 200)
def test_post(self):
"""
POSTing without AJAX should redirect to self.url on success and
render self.template on error.
"""
with self.activate('en-US'):
# test non-AJAX POST with valid form data
request = self.factory.post(self.url, self.post_data)
response = views.process_partnership_form(request, self.template,
self.view)
# should redirect to success URL
self.assertEqual(response.status_code, 302)
self.assertIn(self.url, response._headers['location'][1])
self.assertIn('text/html', response._headers['content-type'][1])
# test non-AJAX POST with invalid form data
request = self.factory.post(self.url, self.invalid_post_data)
# locale is not getting set via self.activate above...?
request.locale = 'en-US'
response = views.process_partnership_form(request, self.template,
self.view)
self.assertEqual(response.status_code, 200)
self.assertIn('text/html', response._headers['content-type'][1])
def test_post_ajax(self):
"""
POSTing with AJAX should return success/error JSON.
"""
with self.activate('en-US'):
# test AJAX POST with valid form data
request = self.factory.post(self.url, self.post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_partnership_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'ok')
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers['content-type'][1],
'application/json')
ok_(self.requests_mock.called)
# test AJAX POST with invalid form data
request = self.factory.post(self.url, self.invalid_post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_partnership_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'Form invalid')
self.assertEqual(response.status_code, 400)
self.assertTrue('email' in resp_data['errors'])
self.assertEqual(response._headers['content-type'][1],
'application/json')
def test_post_ajax_honeypot(self):
"""
POSTing with AJAX and honeypot should return success JSON.
"""
with self.activate('en-US'):
self.post_data['office_fax'] = 'what is this?'
request = self.factory.post(self.url, self.post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_partnership_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'ok')
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers['content-type'][1],
'application/json')
ok_(not self.requests_mock.called)
def test_post_ajax_error_xss(self):
"""
POSTing with AJAX should return sanitized error messages.
Bug 945845.
"""
with self.activate('en-US'):
# test AJAX POST with valid form data
post_data = self.post_data.copy()
post_data['interest'] = '"><img src=x onerror=alert(1);>'
escaped_data = '"><img src=x onerror=alert(1);>'
request = self.factory.post(self.url, post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_partnership_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'Form invalid')
self.assertEqual(response.status_code, 400)
self.assertTrue(post_data['interest'] not in resp_data['errors']['interest'][0])
self.assertTrue(escaped_data in resp_data['errors']['interest'][0])
self.assertEqual(response._headers['content-type'][1],
'application/json')
def test_lead_source(self):
"""
A POST request should include the 'lead_source' field in that call. The
value will be defaulted to 'www.mozilla.org/about/partnerships/' if it's
not specified.
"""
def _req(form_kwargs):
request = self.factory.post(self.url, self.post_data)
views.process_partnership_form(request, self.template,
self.view, {}, form_kwargs)
return self.requests_mock.call_args[0][1]['lead_source']
eq_(_req(None), 'www.mozilla.org/about/partnerships/')
eq_(_req({'lead_source': 'www.mozilla.org/firefox/partners/'}),
'www.mozilla.org/firefox/partners/')
class TestProcessContentServicesForm(TestCase):
def setUp(self):
patcher = patch('bedrock.mozorg.views.requests.post')
self.addCleanup(patcher.stop)
self.requests_mock = patcher.start()
self.requests_mock.return_value.status_code = 200
self.factory = RequestFactory()
self.template = 'mozorg/contentservices/start.html'
self.view = 'mozorg.contentservices.start'
self.post_data = {
'first_name': 'The',
'last_name': 'Dude',
'title': 'Abider of things',
'company': 'Urban Achievers',
'email': '[email protected]',
'country': 'us',
'state': 'ca',
'phone': '5558675309',
}
self.invalid_post_data = {
'first_name': 'The',
'last_name': 'Dude',
'title': 'Abider of things',
'company': 'Urban Achievers',
'email': 'thedude',
}
with self.activate('en-US'):
self.url = reverse(self.view)
def test_get(self):
"""
A GET request should simply return a 200.
"""
request = self.factory.get(self.url)
request.locale = 'en-US'
response = views.process_content_services_form(request, self.template,
self.view)
self.assertEqual(response.status_code, 200)
def test_post(self):
"""
POSTing without AJAX should redirect to self.url on success and
render self.template on error.
"""
with self.activate('en-US'):
# test non-AJAX POST with valid form data
request = self.factory.post(self.url, self.post_data)
response = views.process_content_services_form(request, self.template,
self.view)
# should redirect to success URL
self.assertEqual(response.status_code, 302)
self.assertIn(self.url, response._headers['location'][1])
self.assertIn('text/html', response._headers['content-type'][1])
# test non-AJAX POST with invalid form data
request = self.factory.post(self.url, self.invalid_post_data)
# locale is not getting set via self.activate above...?
request.locale = 'en-US'
response = views.process_content_services_form(request, self.template,
self.view)
self.assertEqual(response.status_code, 200)
self.assertIn('text/html', response._headers['content-type'][1])
def test_post_ajax(self):
"""
POSTing with AJAX should return success/error JSON.
"""
with self.activate('en-US'):
# test AJAX POST with valid form data
request = self.factory.post(self.url, self.post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_content_services_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'ok')
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers['content-type'][1],
'application/json')
ok_(self.requests_mock.called)
# test AJAX POST with invalid form data
request = self.factory.post(self.url, self.invalid_post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_content_services_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'Form invalid')
self.assertEqual(response.status_code, 400)
self.assertTrue('email' in resp_data['errors'])
self.assertEqual(response._headers['content-type'][1],
'application/json')
def test_post_ajax_no_state(self):
"""
State field required if country is 'us'.
"""
with self.activate('en-US'):
# test AJAX POST with non-us country
self.post_data['state'] = ''
self.post_data['country'] = 'de'
request = self.factory.post(self.url, self.post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_content_services_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'ok')
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers['content-type'][1],
'application/json')
ok_(self.requests_mock.called)
self.requests_mock.reset_mock()
# test AJAX POST with us country and no state
self.post_data['state'] = ''
self.post_data['country'] = 'us'
request = self.factory.post(self.url, self.post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_content_services_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'Form invalid')
self.assertEqual(response.status_code, 400)
self.assertTrue('state' in resp_data['errors']['__all__'][0])
self.assertEqual(response._headers['content-type'][1],
'application/json')
ok_(not self.requests_mock.called)
def test_post_ajax_honeypot(self):
"""
POSTing with AJAX and honeypot should return success JSON.
"""
with self.activate('en-US'):
self.post_data['office_fax'] = 'what is this?'
request = self.factory.post(self.url, self.post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_content_services_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'ok')
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers['content-type'][1],
'application/json')
ok_(not self.requests_mock.called)
def test_post_ajax_error_xss(self):
"""
POSTing with AJAX should return sanitized error messages.
Bug 945845.
"""
with self.activate('en-US'):
# test AJAX POST with valid form data
post_data = self.post_data.copy()
post_data['industry'] = '"><img src=x onerror=alert(1);>'
escaped_data = '"><img src=x onerror=alert(1);>'
request = self.factory.post(self.url, post_data,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = views.process_content_services_form(request, self.template,
self.view)
# decode JSON response
resp_data = json.loads(response.content)
self.assertEqual(resp_data['msg'], 'Form invalid')
self.assertEqual(response.status_code, 400)
self.assertTrue(post_data['industry'] not in resp_data['errors']['industry'][0])
self.assertTrue(escaped_data in resp_data['errors']['industry'][0])
self.assertEqual(response._headers['content-type'][1],
'application/json')
ok_(not self.requests_mock.called)
def test_lead_source(self):
"""
A POST request should include the 'lead_source' field in that call. The
value will be defaulted to 'www.mozilla.org/about/partnerships/contentservices/'
if it's not specified.
"""
def _req(form_kwargs):
request = self.factory.post(self.url, self.post_data)
views.process_content_services_form(request, self.template,
self.view, {}, form_kwargs)
return self.requests_mock.call_args[0][1]['lead_source']
eq_(_req(None), 'www.mozilla.org/about/partnerships/contentservices/')
eq_(_req({'lead_source': 'www.mozilla.org/firefox/partners/'}),
'www.mozilla.org/firefox/partners/')
class TestMozIDDataView(TestCase):
def setUp(self):
with patch.object(update_tableau_data, 'get_external_data') as ged:
ged.return_value = (
(date(2015, 2, 2), 'Firefox', 'bugzilla', 100, 10),
(date(2015, 2, 2), 'Firefox OS', 'bugzilla', 100, 10),
(date(2015, 2, 9), 'Sumo', 'sumo', 100, 10),
(date(2015, 2, 9), 'Firefox OS', 'sumo', 100, 10),
(date(2015, 2, 9), 'QA', 'reps', 100, 10),
)
update_tableau_data.run()
def _get_json(self, source):
cache.clear()
req = RequestFactory().get('/')
resp = views.mozid_data_view(req, source)
eq_(resp['content-type'], 'application/json')
eq_(resp['access-control-allow-origin'], '*')
return json.loads(resp.content)
def test_all(self):
eq_(self._get_json('all'), [
{'wkcommencing': '2015-02-09', 'totalactive': 300, 'new': 30},
{'wkcommencing': '2015-02-02', 'totalactive': 200, 'new': 20},
])
def test_team(self):
"""When acting on a team, should just return sums for that team."""
eq_(self._get_json('firefoxos'), [
{'wkcommencing': '2015-02-09', 'totalactive': 100, 'new': 10},
{'wkcommencing': '2015-02-02', 'totalactive': 100, 'new': 10},
])
def test_source(self):
"""When acting on a source, should just return sums for that source."""
eq_(self._get_json('sumo'), [
{'wkcommencing': '2015-02-09', 'totalactive': 100, 'new': 10},
])
@patch('bedrock.mozorg.models.CONTRIBUTOR_SOURCE_NAMES', {})
def test_unknown(self):
"""An unknown source should raise a 404."""
with self.assertRaises(Http404):
self._get_json('does-not-exist')
|
mpl-2.0
|
40223235/2015cd_midterm2
|
static/Brython3.1.1-20150328-091302/Lib/getopt.py
|
845
|
7488
|
"""Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <[email protected]>.
#
# Gerrit Holl <[email protected]> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <[email protected]> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
try:
from gettext import gettext as _
except ImportError:
# Bootstrapping Python: gettext's dependencies not built yet
def _(s): return s
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError(_('option --%s requires argument') % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError(_('option --%s must not have an argument') % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError(_('option --%s not recognized') % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError(_('option -%s requires argument') % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError(_('option -%s not recognized') % opt, opt)
if __name__ == '__main__':
import sys
print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
|
gpl-3.0
|
shsingh/ansible
|
lib/ansible/modules/notification/jabber.py
|
37
|
4452
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
version_added: "1.2"
module: jabber
short_description: Send a message to jabber user or chat room
description:
- Send a message to jabber
options:
user:
description:
- User as which to connect
required: true
password:
description:
- password for user to connect
required: true
to:
description:
- user ID or name of the room, when using room use a slash to indicate your nick.
required: true
msg:
description:
- The message body.
required: true
host:
description:
- host to connect, overrides user info
port:
description:
- port to connect to, overrides default
default: 5222
encoding:
description:
- message encoding
# informational: requirements for nodes
requirements:
- python xmpp (xmpppy)
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# send a message to a user
- jabber:
user: [email protected]
password: secret
to: [email protected]
msg: Ansible task finished
# send a message to a room
- jabber:
user: [email protected]
password: secret
to: [email protected]/ansiblebot
msg: Ansible task finished
# send a message, specifying the host and port
- jabber:
user: [email protected]
host: talk.example.net
port: 5223
password: secret
to: [email protected]
msg: Ansible task finished
'''
import time
import traceback
HAS_XMPP = True
XMPP_IMP_ERR = None
try:
import xmpp
except ImportError:
XMPP_IMP_ERR = traceback.format_exc()
HAS_XMPP = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True),
password=dict(required=True, no_log=True),
to=dict(required=True),
msg=dict(required=True),
host=dict(required=False),
port=dict(required=False, default=5222, type='int'),
encoding=dict(required=False),
),
supports_check_mode=True
)
if not HAS_XMPP:
module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR)
jid = xmpp.JID(module.params['user'])
user = jid.getNode()
server = jid.getDomain()
port = module.params['port']
password = module.params['password']
try:
to, nick = module.params['to'].split('/', 1)
except ValueError:
to, nick = module.params['to'], None
if module.params['host']:
host = module.params['host']
else:
host = server
if module.params['encoding']:
xmpp.simplexml.ENCODING = module.params['encoding']
msg = xmpp.protocol.Message(body=module.params['msg'])
try:
conn = xmpp.Client(server, debug=[])
if not conn.connect(server=(host, port)):
module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
if not conn.auth(user, password, 'Ansible'):
module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server))
# some old servers require this, also the sleep following send
conn.sendInitPresence(requestRoster=0)
if nick: # sending to room instead of user, need to join
msg.setType('groupchat')
msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
join = xmpp.Presence(to=module.params['to'])
join.setTag('x', namespace='http://jabber.org/protocol/muc')
conn.send(join)
time.sleep(1)
else:
msg.setType('chat')
msg.setTo(to)
if not module.check_mode:
conn.send(msg)
time.sleep(1)
conn.disconnect()
except Exception as e:
module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
if __name__ == '__main__':
main()
|
gpl-3.0
|
drybjed/debops
|
ansible/roles/btrfs/library/btrfs_subvolume.py
|
2
|
6480
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Ilya Barsukov <[email protected]>, Selectel LLC
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# import module snippets
from ansible.module_utils.basic import *
import os
DOCUMENTATION = """
---
module: btrfs_subvolume
short_description: Provides `create` and `delete` subvolumes methods.
description:
- The M(btrfs_subvolume) module takes the command name followed by
a list of space-delimited arguments.
- The given command will be executed on all selected nodes.
version_added: 0.1
author: Ilya Barsukov
options:
state:
description:
- creates or deletes subvolume
required: true
choices: ["present", "absent"]
path:
description:
- subvolume absolute path
required: true
default: null
qgroups:
required: false
description:
- list of qgroup ids, adds the newly created subvolume to a qgroup
default: []
commit:
required: false
description:
- wait for transaction commit at the end of the operation
or of the each
choices: ["each", "after", "no"]
default: "no"
recursive:
required: false
choices: [true, false]
description:
- create or delete subvolumes recursively
default: false
"""
EXAMPLES = """
# Example for Ansible Playbooks.
- name: Recursive create given subvolume path
btrfs_subvolume:
state: 'present'
path: '/storage/test/test1/test2'
qgroups:
- '1/100'
- '1/101'
recursive: True
- name: Delete given Btrfs subvolume
btrfs_subvolume:
state: 'absent'
path: '/storage/test/test1/test2'
commit: 'each'
"""
def get_subvolumes(path, subs=None):
if subs is None:
subs = []
subvolumes = os.listdir(path)
for sub in subvolumes:
sub = os.path.sep.join([path, sub])
if not os.path.isdir(sub):
continue
subs.append(sub)
subs = get_subvolumes(sub, subs=subs)
return subs
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent'],
type='str'),
path=dict(required=True, default=None, type='str'),
qgroups=dict(default=[], type='list'),
commit=dict(default='no', choices=['after', 'each', 'no'],
type='str'),
recursive=dict(default='False', type='bool')
),
supports_check_mode=True
)
result = {
'changed': False,
'commands': [],
'check': module.check_mode
}
param_path = module.params['path'].rstrip(os.path.sep)
if module.params['state'] == 'present':
# Creating subvolume
if not os.path.exists(param_path) and not module.params['recursive']:
cmd = 'btrfs subvolume create {qgroups} {subvolume}'.format(
qgroups=' -i '.join(['']+module.params['qgroups']),
subvolume=param_path,
)
result['commands'].append(cmd)
if not module.check_mode:
module.run_command(cmd, check_rc=True)
result['changed'] = True
elif module.params['recursive']:
# Check parent subvolumes and create it if they doesnt exist
parents = param_path.split(os.path.sep)
for idx, subvolume in enumerate(parents):
if len(subvolume) == 0:
continue
subvolume = os.path.sep.join(parents[:idx+1])
if not os.path.exists(subvolume):
cmd = ('btrfs subvolume create '
'{qgroups} {subvolume}').format(
qgroups=' -i '.join(['']+module.params['qgroups']),
subvolume=subvolume,
)
result['commands'].append(cmd)
if not module.check_mode:
module.run_command(cmd, check_rc=True)
result['changed'] = True
elif module.params['state'] == 'absent':
# Delete subvolume
commit = ''
if module.params['commit'] != 'no':
commit = '--commit-{}'.format(module.params['commit'])
if os.path.exists(param_path):
if not module.params['recursive']:
cmd = 'btrfs subvolume delete {commit} {subvolume}'.format(
commit=commit, subvolume=param_path
)
result['commands'].append(cmd)
if not module.check_mode:
module.run_command(cmd, check_rc=True)
result['changed'] = True
elif module.params['recursive']:
# reversed parent directories from end to beginning
subvolumes = get_subvolumes(param_path)
subvolumes.insert(0, param_path)
for sub in reversed(subvolumes):
if os.path.exists(sub):
cmd = ('btrfs subvolume delete '
'{commit} {subvolume}').format(
commit=commit, subvolume=sub
)
result['commands'].append(cmd)
if not module.check_mode:
module.run_command(cmd, check_rc=True)
result['changed'] = True
if module.check_mode and result['commands']:
result['changed'] = True
if not module.check_mode:
del result['check']
if not result['commands']:
del result['commands']
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
asm0dey/Flexget
|
flexget/plugins/filter/movie_queue.py
|
1
|
12329
|
from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import Column, Integer, String, ForeignKey, or_, and_, select, update
from sqlalchemy.orm.exc import NoResultFound
from flexget import db_schema, plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.utils import qualities
from flexget.utils.imdb import extract_id
from flexget.utils.log import log_once
from flexget.utils.database import quality_requirement_property, with_session
from flexget.utils.sqlalchemy_utils import table_exists, table_schema
try:
from flexget.plugins.filter import queue_base
except ImportError:
raise plugin.DependencyError(issued_by='movie_queue', missing='queue_base',
message='movie_queue requires the queue_base plugin')
log = logging.getLogger('movie_queue')
Base = db_schema.versioned_base('movie_queue', 2)
@event('manager.lock-acquired')
def migrate_imdb_queue(manager):
"""If imdb_queue table is found, migrate the data to movie_queue"""
session = Session()
try:
if table_exists('imdb_queue', session):
log.info('Migrating imdb_queue items to movie_queue')
old_table = table_schema('imdb_queue', session)
for row in session.execute(old_table.select()):
try:
queue_add(imdb_id=row['imdb_id'], quality=row['quality'], session=session)
except QueueError as e:
log.error('Unable to migrate %s from imdb_queue to movie_queue' % row['title'])
old_table.drop()
session.commit()
finally:
session.close()
@db_schema.upgrade('movie_queue')
def upgrade(ver, session):
if ver == 0:
# Translate old qualities into new quality requirements
movie_table = table_schema('movie_queue', session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
# Webdl quality no longer has dash
new_qual = row['quality'].replace('web-dl', 'webdl')
if new_qual.lower() != 'any':
# Old behavior was to get specified quality or greater, approximate that with new system
new_qual = ' '.join(qual + '+' for qual in new_qual.split(' '))
session.execute(update(movie_table, movie_table.c.id == row['id'],
{'quality': new_qual}))
ver = 1
if ver == 1:
# Bad upgrade left some qualities as 'ANY+'
movie_table = table_schema('movie_queue', session)
for row in session.execute(select([movie_table.c.id, movie_table.c.quality])):
if row['quality'].lower() == 'any+':
session.execute(update(movie_table, movie_table.c.id == row['id'],
{'quality': 'ANY'}))
ver = 2
return ver
class QueuedMovie(queue_base.QueuedItem, Base):
__tablename__ = 'movie_queue'
__mapper_args__ = {'polymorphic_identity': 'movie'}
id = Column(Integer, ForeignKey('queue.id'), primary_key=True)
imdb_id = Column(String)
tmdb_id = Column(Integer)
quality = Column('quality', String)
quality_req = quality_requirement_property('quality')
class FilterMovieQueue(queue_base.FilterQueueBase):
def matches(self, task, config, entry):
# Tell tmdb_lookup to add lazy lookup fields if not already present
try:
plugin.get_plugin_by_name('imdb_lookup').instance.register_lazy_fields(entry)
except plugin.DependencyError:
log.debug('imdb_lookup is not available, queue will not work if movie ids are not populated')
try:
plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(entry)
except plugin.DependencyError:
log.debug('tmdb_lookup is not available, queue will not work if movie ids are not populated')
conditions = []
# Check if a movie id is already populated before incurring a lazy lookup
for lazy in [False, True]:
if entry.get('imdb_id', eval_lazy=lazy):
conditions.append(QueuedMovie.imdb_id == entry['imdb_id'])
if entry.get('tmdb_id', eval_lazy=lazy and not conditions):
conditions.append(QueuedMovie.tmdb_id == entry['tmdb_id'])
if conditions:
break
if not conditions:
log_once('IMDB and TMDB lookups failed for %s.' % entry['title'], log, logging.WARN)
return
quality = entry.get('quality', qualities.Quality())
movie = task.session.query(QueuedMovie).filter(QueuedMovie.downloaded == None). \
filter(or_(*conditions)).first()
if movie and movie.quality_req.allows(quality):
return movie
class QueueError(Exception):
"""Exception raised if there is an error with a queue operation"""
# TODO: I think message was removed from exception baseclass and is now masked
# some other custom exception (DependencyError) had to make so tweaks to make it work ..
def __init__(self, message, errno=0):
self.message = message
self.errno = errno
@with_session
def parse_what(what, lookup=True, session=None):
"""
Determines what information was provided by the search string `what`.
If `lookup` is true, will fill in other information from tmdb.
:param what: Can be one of:
<Movie Title>: Search based on title
imdb_id=<IMDB id>: search based on imdb id
tmdb_id=<TMDB id>: search based on tmdb id
:param bool lookup: Whether missing info should be filled in from tmdb.
:param session: An existing session that will be used for lookups if provided.
:rtype: dict
:return: A dictionary with 'title', 'imdb_id' and 'tmdb_id' keys
"""
tmdb_lookup = plugin.get_plugin_by_name('api_tmdb').instance.lookup
result = {'title': None, 'imdb_id': None, 'tmdb_id': None}
result['imdb_id'] = extract_id(what)
if not result['imdb_id']:
if what.startswith('tmdb_id='):
result['tmdb_id'] = what[8:]
else:
result['title'] = what
if not lookup:
# If not doing an online lookup we can return here
return result
search_entry = Entry(title=result['title'] or '')
for field in ['imdb_id', 'tmdb_id']:
if result.get(field):
search_entry[field] = result[field]
# Put lazy lookup fields on the search entry
plugin.get_plugin_by_name('imdb_lookup').instance.register_lazy_fields(search_entry)
plugin.get_plugin_by_name('tmdb_lookup').instance.lookup(search_entry)
try:
# Both ids are optional, but if movie_name was populated at least one of them will be there
return {'title': search_entry['movie_name'], 'imdb_id': search_entry.get('imdb_id'),
'tmdb_id': search_entry.get('tmdb_id')}
except KeyError as e:
raise QueueError(e.message)
# API functions to edit queue
@with_session
def queue_add(title=None, imdb_id=None, tmdb_id=None, quality=None, session=None):
"""
Add an item to the queue with the specified quality requirements.
One or more of `title` `imdb_id` or `tmdb_id` must be specified when calling this function.
:param title: Title of the movie. (optional)
:param imdb_id: IMDB id for the movie. (optional)
:param tmdb_id: TMDB id for the movie. (optional)
:param quality: A QualityRequirements object defining acceptable qualities.
:param session: Optional session to use for database updates
"""
quality = quality or qualities.Requirements('any')
if not title or not (imdb_id or tmdb_id):
# We don't have all the info we need to add movie, do a lookup for more info
result = parse_what(imdb_id or title, session=session)
title = result['title']
imdb_id = result['imdb_id']
tmdb_id = result['tmdb_id']
# check if the item is already queued
item = session.query(QueuedMovie).filter(or_(and_(QueuedMovie.imdb_id != None, QueuedMovie.imdb_id == imdb_id),
and_(QueuedMovie.tmdb_id != None, QueuedMovie.tmdb_id == tmdb_id))). \
first()
if not item:
item = QueuedMovie(title=title, imdb_id=imdb_id, tmdb_id=tmdb_id, quality=quality.text)
session.add(item)
log.info('Adding %s to movie queue with quality=%s.' % (title, quality))
return {'title': title, 'imdb_id': imdb_id, 'tmdb_id': tmdb_id, 'quality': quality}
else:
if item.downloaded:
raise QueueError('ERROR: %s has already been queued and downloaded' % title)
else:
raise QueueError('ERROR: %s is already in the queue' % title, errno=1)
@with_session
def queue_del(title=None, imdb_id=None, tmdb_id=None, session=None):
"""
Delete the given item from the queue.
:param title: Movie title
:param imdb_id: Imdb id
:param tmdb_id: Tmdb id
:param session: Optional session to use, new session used otherwise
:return: Title of forgotten movie
:raises QueueError: If queued item could not be found with given arguments
"""
log.debug('queue_del - title=%s, imdb_id=%s, tmdb_id=%s' % (title, imdb_id, tmdb_id))
query = session.query(QueuedMovie)
if imdb_id:
query = query.filter(QueuedMovie.imdb_id == imdb_id)
elif tmdb_id:
query = query.filter(QueuedMovie.tmdb_id == tmdb_id)
elif title:
query = query.filter(QueuedMovie.title == title)
try:
item = query.one()
title = item.title
session.delete(item)
return title
except NoResultFound as e:
raise QueueError('title=%s, imdb_id=%s, tmdb_id=%s not found from queue' % (title, imdb_id, tmdb_id))
@with_session
def queue_forget(title=None, imdb_id=None, tmdb_id=None, session=None):
"""
Forget movie download from the queue.
:param title: Movie title
:param imdb_id: Imdb id
:param tmdb_id: Tmdb id
:param session: Optional session to use, new session used otherwise
:return: Title of forgotten movie
:raises QueueError: If queued item could not be found with given arguments
"""
log.debug('queue_forget - title=%s, imdb_id=%s, tmdb_id=%s' % (title, imdb_id, tmdb_id))
query = session.query(QueuedMovie)
if imdb_id:
query = query.filter(QueuedMovie.imdb_id == imdb_id)
elif tmdb_id:
query = query.filter(QueuedMovie.tmdb_id == tmdb_id)
elif title:
query = query.filter(QueuedMovie.title == title)
try:
item = query.one()
title = item.title
if not item.downloaded:
raise QueueError('%s is not marked as downloaded' % title)
item.downloaded = None
return title
except NoResultFound as e:
raise QueueError('title=%s, imdb_id=%s, tmdb_id=%s not found from queue' % (title, imdb_id, tmdb_id))
@with_session
def queue_edit(quality, imdb_id=None, tmdb_id=None, session=None):
"""
:param quality: Change the required quality for a movie in the queue
:param imdb_id: Imdb id
:param tmdb_id: Tmdb id
:param session: Optional session to use, new session used otherwise
:return: Title of edited item
:raises QueueError: If queued item could not be found with given arguments
"""
# check if the item is queued
try:
item = session.query(QueuedMovie).filter(QueuedMovie.imdb_id == imdb_id).one()
item.quality = quality
return item.title
except NoResultFound as e:
raise QueueError('imdb_id=%s, tmdb_id=%s not found from queue' % (imdb_id, tmdb_id))
@with_session
def queue_get(session=None, downloaded=False):
"""
Get the current movie queue.
:param session: New session is used it not given
:param bool downloaded: Whether or not to return only downloaded
:return: List of QueuedMovie objects (detached from session)
"""
if not downloaded:
return session.query(QueuedMovie).filter(QueuedMovie.downloaded == None).all()
else:
return session.query(QueuedMovie).filter(QueuedMovie.downloaded != None).all()
@event('plugin.register')
def register_plugin():
plugin.register(FilterMovieQueue, 'movie_queue', api_ver=2)
|
mit
|
indevgr/django
|
django/contrib/admin/sites.py
|
3
|
19794
|
from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured(
'The model %s is abstract, so it cannot be registered with admin.' % model.__name__
)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
admin_obj = admin_class(model, self)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_obj.check())
self._registry[model] = admin_obj
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
For sites running on a subpath, use the SCRIPT_NAME value if site_url
hasn't been customized.
"""
script_name = request.META['SCRIPT_NAME']
site_url = script_name if self.site_url == '/' and script_name else self.site_url
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
request.current_app = self.name
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'extra_context': dict(
self.each_context(request),
# Since the user isn't logged out at this point, the value of
# has_permission must be overridden.
has_permission=False,
**(extra_context or {})
),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
request.current_app = self.name
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(
self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
request.current_app = self.name
return login(request, **defaults)
def _build_app_dict(self, request, label=None):
"""
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
if label:
raise PermissionDenied
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Returns a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or 'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
app_name = apps.get_app_config(app_label).verbose_name
context = dict(
self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
|
bsd-3-clause
|
pplatek/odoo
|
addons/base_report_designer/plugin/openerp_report_designer/test/test_fields.py
|
391
|
1308
|
#
# Use this module to retrive the fields you need according to the type
# of the OpenOffice operation:
# * Insert a Field
# * Insert a RepeatIn
#
import xmlrpclib
import time
sock = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/object')
def get(object, level=3, ending=None, ending_excl=None, recur=None, root=''):
if ending is None:
ending = []
if ending_excl is None:
ending_excl = []
if recur is None:
recur = []
res = sock.execute('terp', 3, 'admin', 'account.invoice', 'fields_get')
key = res.keys()
key.sort()
for k in key:
if (not ending or res[k]['type'] in ending) and ((not ending_excl) or not (res[k]['type'] in ending_excl)):
print root+'/'+k
if res[k]['type'] in recur:
print root+'/'+k
if (res[k]['type'] in recur) and (level>0):
get(res[k]['relation'], level-1, ending, ending_excl, recur, root+'/'+k)
print 'Field selection for a rields', '='*40
get('account.invoice', level=0, ending_excl=['one2many','many2one','many2many','reference'], recur=['many2one'])
print
print 'Field selection for a repeatIn', '='*40
get('account.invoice', level=0, ending=['one2many','many2many'], recur=['many2one'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
vivekkodu/robotframework-selenium2library
|
doc/buildhtml.py
|
72
|
9935
|
#!/usr/bin/env python
# $Id: buildhtml.py 7037 2011-05-19 08:56:27Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Generates .html from all the .txt files in a directory.
Ordinary .txt files are understood to be standalone reStructuredText.
Files named ``pep-*.txt`` are interpreted as reStructuredText PEPs.
"""
# Once PySource is here, build .html from .py as well.
__docformat__ = 'reStructuredText'
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import sys
import os
import os.path
import copy
from fnmatch import fnmatch
import docutils
from docutils import ApplicationError
from docutils import core, frontend, utils
from docutils.error_reporting import ErrorOutput, ErrorString
from docutils.parsers import rst
from docutils.readers import standalone, pep
from docutils.writers import html4css1, pep_html
usage = '%prog [options] [<directory> ...]'
description = ('Generates .html from all the reStructuredText .txt files '
'(including PEPs) in each <directory> '
'(default is the current directory).')
class SettingsSpec(docutils.SettingsSpec):
"""
Runtime settings & command-line options for the front end.
"""
# Can't be included in OptionParser below because we don't want to
# override the base class.
settings_spec = (
'Build-HTML Options',
None,
(('Recursively scan subdirectories for files to process. This is '
'the default.',
['--recurse'],
{'action': 'store_true', 'default': 1,
'validator': frontend.validate_boolean}),
('Do not scan subdirectories for files to process.',
['--local'], {'dest': 'recurse', 'action': 'store_false'}),
('BROKEN Do not process files in <directory>. This option may be used '
'more than once to specify multiple directories.',
['--prune'],
{'metavar': '<directory>', 'action': 'append',
'validator': frontend.validate_colon_separated_string_list}),
('BROKEN Recursively ignore files or directories matching any of the given '
'wildcard (shell globbing) patterns (separated by colons). '
'Default: ".svn:CVS"',
['--ignore'],
{'metavar': '<patterns>', 'action': 'append',
'default': ['.svn', 'CVS'],
'validator': frontend.validate_colon_separated_string_list}),
('Work silently (no progress messages). Independent of "--quiet".',
['--silent'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Do not process files, show files that would be processed.',
['--dry-run'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
relative_path_settings = ('prune',)
config_section = 'buildhtml application'
config_section_dependencies = ('applications',)
class OptionParser(frontend.OptionParser):
"""
Command-line option processing for the ``buildhtml.py`` front end.
"""
def check_values(self, values, args):
frontend.OptionParser.check_values(self, values, args)
values._source = None
return values
def check_args(self, args):
source = destination = None
if args:
self.values._directories = args
else:
self.values._directories = [os.getcwd()]
return source, destination
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class Builder:
def __init__(self):
self.publishers = {
'': Struct(components=(pep.Reader, rst.Parser, pep_html.Writer,
SettingsSpec)),
'.txt': Struct(components=(rst.Parser, standalone.Reader,
html4css1.Writer, SettingsSpec),
reader_name='standalone',
writer_name='html'),
'PEPs': Struct(components=(rst.Parser, pep.Reader,
pep_html.Writer, SettingsSpec),
reader_name='pep',
writer_name='pep_html')}
"""Publisher-specific settings. Key '' is for the front-end script
itself. ``self.publishers[''].components`` must contain a superset of
all components used by individual publishers."""
self.setup_publishers()
def setup_publishers(self):
"""
Manage configurations for individual publishers.
Each publisher (combination of parser, reader, and writer) may have
its own configuration defaults, which must be kept separate from those
of the other publishers. Setting defaults are combined with the
config file settings and command-line options by
`self.get_settings()`.
"""
for name, publisher in self.publishers.items():
option_parser = OptionParser(
components=publisher.components, read_config_files=1,
usage=usage, description=description)
publisher.option_parser = option_parser
publisher.setting_defaults = option_parser.get_default_values()
frontend.make_paths_absolute(publisher.setting_defaults.__dict__,
option_parser.relative_path_settings)
publisher.config_settings = (
option_parser.get_standard_config_settings())
self.settings_spec = self.publishers[''].option_parser.parse_args(
values=frontend.Values()) # no defaults; just the cmdline opts
self.initial_settings = self.get_settings('')
def get_settings(self, publisher_name, directory=None):
"""
Return a settings object, from multiple sources.
Copy the setting defaults, overlay the startup config file settings,
then the local config file settings, then the command-line options.
Assumes the current directory has been set.
"""
publisher = self.publishers[publisher_name]
settings = frontend.Values(publisher.setting_defaults.__dict__)
settings.update(publisher.config_settings, publisher.option_parser)
if directory:
local_config = publisher.option_parser.get_config_file_settings(
os.path.join(directory, 'docutils.conf'))
frontend.make_paths_absolute(
local_config, publisher.option_parser.relative_path_settings,
directory)
settings.update(local_config, publisher.option_parser)
settings.update(self.settings_spec.__dict__, publisher.option_parser)
return settings
def run(self, directory=None, recurse=1):
recurse = recurse and self.initial_settings.recurse
if directory:
self.directories = [directory]
elif self.settings_spec._directories:
self.directories = self.settings_spec._directories
else:
self.directories = [os.getcwd()]
for directory in self.directories:
for root, dirs, files in os.walk(directory):
# os.walk by default this recurses down the tree,
# influence by modifying dirs.
if not recurse:
del dirs[:]
self.visit(root, files)
def visit(self, directory, names):
# BUG prune and ignore do not work
settings = self.get_settings('', directory)
errout = ErrorOutput(encoding=settings.error_encoding)
if settings.prune and (os.path.abspath(directory) in settings.prune):
print >>errout, ('/// ...Skipping directory (pruned): %s' %
directory)
sys.stderr.flush()
names[:] = []
return
if not self.initial_settings.silent:
print >>errout, '/// Processing directory: %s' % directory
sys.stderr.flush()
# settings.ignore grows many duplicate entries as we recurse
# if we add patterns in config files or on the command line.
for pattern in utils.uniq(settings.ignore):
for i in range(len(names) - 1, -1, -1):
if fnmatch(names[i], pattern):
# Modify in place!
del names[i]
prune = 0
for name in names:
if name.endswith('.txt'):
prune = self.process_txt(directory, name)
if prune:
break
def process_txt(self, directory, name):
if name.startswith('pep-'):
publisher = 'PEPs'
else:
publisher = '.txt'
settings = self.get_settings(publisher, directory)
errout = ErrorOutput(encoding=settings.error_encoding)
pub_struct = self.publishers[publisher]
if settings.prune and (directory in settings.prune):
return 1
settings._source = os.path.normpath(os.path.join(directory, name))
settings._destination = settings._source[:-4]+'.html'
if not self.initial_settings.silent:
print >>errout, ' ::: Processing: %s' % name
sys.stderr.flush()
try:
if not settings.dry_run:
core.publish_file(source_path=settings._source,
destination_path=settings._destination,
reader_name=pub_struct.reader_name,
parser_name='restructuredtext',
writer_name=pub_struct.writer_name,
settings=settings)
except ApplicationError, error:
print >>errout, ' %s' % ErrorString(error)
if __name__ == "__main__":
Builder().run()
|
apache-2.0
|
JoshWu/linux-at91
|
tools/perf/scripts/python/netdev-times.py
|
1544
|
15191
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
gpl-2.0
|
gtko/CouchPotatoServer
|
libs/xmpp/simplexml.py
|
198
|
22791
|
## simplexml.py based on Mattew Allum's xmlstream.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: simplexml.py,v 1.34 2009/03/03 10:24:02 normanr Exp $
"""Simplexml module provides xmpppy library with all needed tools to handle XML nodes and XML streams.
I'm personally using it in many other separate projects. It is designed to be as standalone as possible."""
import xml.parsers.expat
def XMLescape(txt):
"""Returns provided string with symbols & < > " replaced by their respective XML entities."""
# replace also FORM FEED and ESC, because they are not valid XML chars
return txt.replace("&", "&").replace("<", "<").replace(">", ">").replace('"', """).replace(u'\x0C', "").replace(u'\x1B', "")
ENCODING='utf-8'
def ustr(what):
"""Converts object "what" to unicode string using it's own __str__ method if accessible or unicode method otherwise."""
if isinstance(what, unicode): return what
try: r=what.__str__()
except AttributeError: r=str(what)
if not isinstance(r, unicode): return unicode(r,ENCODING)
return r
class Node(object):
""" Node class describes syntax of separate XML Node. It have a constructor that permits node creation
from set of "namespace name", attributes and payload of text strings and other nodes.
It does not natively support building node from text string and uses NodeBuilder class for that purpose.
After creation node can be mangled in many ways so it can be completely changed.
Also node can be serialised into string in one of two modes: default (where the textual representation
of node describes it exactly) and "fancy" - with whitespace added to make indentation and thus make
result more readable by human.
Node class have attribute FORCE_NODE_RECREATION that is defaults to False thus enabling fast node
replication from the some other node. The drawback of the fast way is that new node shares some
info with the "original" node that is changing the one node may influence the other. Though it is
rarely needed (in xmpppy it is never needed at all since I'm usually never using original node after
replication (and using replication only to move upwards on the classes tree).
"""
FORCE_NODE_RECREATION=0
def __init__(self, tag=None, attrs={}, payload=[], parent=None, nsp=None, node_built=False, node=None):
""" Takes "tag" argument as the name of node (prepended by namespace, if needed and separated from it
by a space), attrs dictionary as the set of arguments, payload list as the set of textual strings
and child nodes that this node carries within itself and "parent" argument that is another node
that this one will be the child of. Also the __init__ can be provided with "node" argument that is
either a text string containing exactly one node or another Node instance to begin with. If both
"node" and other arguments is provided then the node initially created as replica of "node"
provided and then modified to be compliant with other arguments."""
if node:
if self.FORCE_NODE_RECREATION and isinstance(node, Node):
node=str(node)
if not isinstance(node, Node):
node=NodeBuilder(node,self)
node_built = True
else:
self.name,self.namespace,self.attrs,self.data,self.kids,self.parent,self.nsd = node.name,node.namespace,{},[],[],node.parent,{}
for key in node.attrs.keys(): self.attrs[key]=node.attrs[key]
for data in node.data: self.data.append(data)
for kid in node.kids: self.kids.append(kid)
for k,v in node.nsd.items(): self.nsd[k] = v
else: self.name,self.namespace,self.attrs,self.data,self.kids,self.parent,self.nsd = 'tag','',{},[],[],None,{}
if parent:
self.parent = parent
self.nsp_cache = {}
if nsp:
for k,v in nsp.items(): self.nsp_cache[k] = v
for attr,val in attrs.items():
if attr == 'xmlns':
self.nsd[u''] = val
elif attr.startswith('xmlns:'):
self.nsd[attr[6:]] = val
self.attrs[attr]=attrs[attr]
if tag:
if node_built:
pfx,self.name = (['']+tag.split(':'))[-2:]
self.namespace = self.lookup_nsp(pfx)
else:
if ' ' in tag:
self.namespace,self.name = tag.split()
else:
self.name = tag
if isinstance(payload, basestring): payload=[payload]
for i in payload:
if isinstance(i, Node): self.addChild(node=i)
else: self.data.append(ustr(i))
def lookup_nsp(self,pfx=''):
ns = self.nsd.get(pfx,None)
if ns is None:
ns = self.nsp_cache.get(pfx,None)
if ns is None:
if self.parent:
ns = self.parent.lookup_nsp(pfx)
self.nsp_cache[pfx] = ns
else:
return 'http://www.gajim.org/xmlns/undeclared'
return ns
def __str__(self,fancy=0):
""" Method used to dump node into textual representation.
if "fancy" argument is set to True produces indented output for readability."""
s = (fancy-1) * 2 * ' ' + "<" + self.name
if self.namespace:
if not self.parent or self.parent.namespace!=self.namespace:
if 'xmlns' not in self.attrs:
s = s + ' xmlns="%s"'%self.namespace
for key in self.attrs.keys():
val = ustr(self.attrs[key])
s = s + ' %s="%s"' % ( key, XMLescape(val) )
s = s + ">"
cnt = 0
if self.kids:
if fancy: s = s + "\n"
for a in self.kids:
if not fancy and (len(self.data)-1)>=cnt: s=s+XMLescape(self.data[cnt])
elif (len(self.data)-1)>=cnt: s=s+XMLescape(self.data[cnt].strip())
if isinstance(a, Node):
s = s + a.__str__(fancy and fancy+1)
elif a:
s = s + a.__str__()
cnt=cnt+1
if not fancy and (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt])
elif (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt].strip())
if not self.kids and s.endswith('>'):
s=s[:-1]+' />'
if fancy: s = s + "\n"
else:
if fancy and not self.data: s = s + (fancy-1) * 2 * ' '
s = s + "</" + self.name + ">"
if fancy: s = s + "\n"
return s
def getCDATA(self):
""" Serialise node, dropping all tags and leaving CDATA intact.
That is effectively kills all formatiing, leaving only text were contained in XML.
"""
s = ""
cnt = 0
if self.kids:
for a in self.kids:
s=s+self.data[cnt]
if a: s = s + a.getCDATA()
cnt=cnt+1
if (len(self.data)-1) >= cnt: s = s + self.data[cnt]
return s
def addChild(self, name=None, attrs={}, payload=[], namespace=None, node=None):
""" If "node" argument is provided, adds it as child node. Else creates new node from
the other arguments' values and adds it as well."""
if 'xmlns' in attrs:
raise AttributeError("Use namespace=x instead of attrs={'xmlns':x}")
if node:
newnode=node
node.parent = self
else: newnode=Node(tag=name, parent=self, attrs=attrs, payload=payload)
if namespace:
newnode.setNamespace(namespace)
self.kids.append(newnode)
self.data.append(u'')
return newnode
def addData(self, data):
""" Adds some CDATA to node. """
self.data.append(ustr(data))
self.kids.append(None)
def clearData(self):
""" Removes all CDATA from the node. """
self.data=[]
def delAttr(self, key):
""" Deletes an attribute "key" """
del self.attrs[key]
def delChild(self, node, attrs={}):
""" Deletes the "node" from the node's childs list, if "node" is an instance.
Else deletes the first node that have specified name and (optionally) attributes. """
if not isinstance(node, Node): node=self.getTag(node,attrs)
self.kids[self.kids.index(node)]=None
return node
def getAttrs(self):
""" Returns all node's attributes as dictionary. """
return self.attrs
def getAttr(self, key):
""" Returns value of specified attribute. """
try: return self.attrs[key]
except: return None
def getChildren(self):
""" Returns all node's child nodes as list. """
return self.kids
def getData(self):
""" Returns all node CDATA as string (concatenated). """
return ''.join(self.data)
def getName(self):
""" Returns the name of node """
return self.name
def getNamespace(self):
""" Returns the namespace of node """
return self.namespace
def getParent(self):
""" Returns the parent of node (if present). """
return self.parent
def getPayload(self):
""" Return the payload of node i.e. list of child nodes and CDATA entries.
F.e. for "<node>text1<nodea/><nodeb/> text2</node>" will be returned list:
['text1', <nodea instance>, <nodeb instance>, ' text2']. """
ret=[]
for i in range(max(len(self.data),len(self.kids))):
if i < len(self.data) and self.data[i]: ret.append(self.data[i])
if i < len(self.kids) and self.kids[i]: ret.append(self.kids[i])
return ret
def getTag(self, name, attrs={}, namespace=None):
""" Filters all child nodes using specified arguments as filter.
Returns the first found or None if not found. """
return self.getTags(name, attrs, namespace, one=1)
def getTagAttr(self,tag,attr):
""" Returns attribute value of the child with specified name (or None if no such attribute)."""
try: return self.getTag(tag).attrs[attr]
except: return None
def getTagData(self,tag):
""" Returns cocatenated CDATA of the child with specified name."""
try: return self.getTag(tag).getData()
except: return None
def getTags(self, name, attrs={}, namespace=None, one=0):
""" Filters all child nodes using specified arguments as filter.
Returns the list of nodes found. """
nodes=[]
for node in self.kids:
if not node: continue
if namespace and namespace!=node.getNamespace(): continue
if node.getName() == name:
for key in attrs.keys():
if key not in node.attrs or node.attrs[key]!=attrs[key]: break
else: nodes.append(node)
if one and nodes: return nodes[0]
if not one: return nodes
def iterTags(self, name, attrs={}, namespace=None):
""" Iterate over all children using specified arguments as filter. """
for node in self.kids:
if not node: continue
if namespace is not None and namespace!=node.getNamespace(): continue
if node.getName() == name:
for key in attrs.keys():
if key not in node.attrs or \
node.attrs[key]!=attrs[key]: break
else:
yield node
def setAttr(self, key, val):
""" Sets attribute "key" with the value "val". """
self.attrs[key]=val
def setData(self, data):
""" Sets node's CDATA to provided string. Resets all previous CDATA!"""
self.data=[ustr(data)]
def setName(self,val):
""" Changes the node name. """
self.name = val
def setNamespace(self, namespace):
""" Changes the node namespace. """
self.namespace=namespace
def setParent(self, node):
""" Sets node's parent to "node". WARNING: do not checks if the parent already present
and not removes the node from the list of childs of previous parent. """
self.parent = node
def setPayload(self,payload,add=0):
""" Sets node payload according to the list specified. WARNING: completely replaces all node's
previous content. If you wish just to add child or CDATA - use addData or addChild methods. """
if isinstance(payload, basestring): payload=[payload]
if add: self.kids+=payload
else: self.kids=payload
def setTag(self, name, attrs={}, namespace=None):
""" Same as getTag but if the node with specified namespace/attributes not found, creates such
node and returns it. """
node=self.getTags(name, attrs, namespace=namespace, one=1)
if node: return node
else: return self.addChild(name, attrs, namespace=namespace)
def setTagAttr(self,tag,attr,val):
""" Creates new node (if not already present) with name "tag"
and sets it's attribute "attr" to value "val". """
try: self.getTag(tag).attrs[attr]=val
except: self.addChild(tag,attrs={attr:val})
def setTagData(self,tag,val,attrs={}):
""" Creates new node (if not already present) with name "tag" and (optionally) attributes "attrs"
and sets it's CDATA to string "val". """
try: self.getTag(tag,attrs).setData(ustr(val))
except: self.addChild(tag,attrs,payload=[ustr(val)])
def has_attr(self,key):
""" Checks if node have attribute "key"."""
return key in self.attrs
def __getitem__(self,item):
""" Returns node's attribute "item" value. """
return self.getAttr(item)
def __setitem__(self,item,val):
""" Sets node's attribute "item" value. """
return self.setAttr(item,val)
def __delitem__(self,item):
""" Deletes node's attribute "item". """
return self.delAttr(item)
def __getattr__(self,attr):
""" Reduce memory usage caused by T/NT classes - use memory only when needed. """
if attr=='T':
self.T=T(self)
return self.T
if attr=='NT':
self.NT=NT(self)
return self.NT
raise AttributeError
class T:
""" Auxiliary class used to quick access to node's child nodes. """
def __init__(self,node): self.__dict__['node']=node
def __getattr__(self,attr): return self.node.getTag(attr)
def __setattr__(self,attr,val):
if isinstance(val,Node): Node.__init__(self.node.setTag(attr),node=val)
else: return self.node.setTagData(attr,val)
def __delattr__(self,attr): return self.node.delChild(attr)
class NT(T):
""" Auxiliary class used to quick create node's child nodes. """
def __getattr__(self,attr): return self.node.addChild(attr)
def __setattr__(self,attr,val):
if isinstance(val,Node): self.node.addChild(attr,node=val)
else: return self.node.addChild(attr,payload=[val])
DBG_NODEBUILDER = 'nodebuilder'
class NodeBuilder:
""" Builds a Node class minidom from data parsed to it. This class used for two purposes:
1. Creation an XML Node from a textual representation. F.e. reading a config file. See an XML2Node method.
2. Handling an incoming XML stream. This is done by mangling
the __dispatch_depth parameter and redefining the dispatch method.
You do not need to use this class directly if you do not designing your own XML handler."""
def __init__(self,data=None,initial_node=None):
""" Takes two optional parameters: "data" and "initial_node".
By default class initialised with empty Node class instance.
Though, if "initial_node" is provided it used as "starting point".
You can think about it as of "node upgrade".
"data" (if provided) feeded to parser immidiatedly after instance init.
"""
self.DEBUG(DBG_NODEBUILDER, "Preparing to handle incoming XML stream.", 'start')
self._parser = xml.parsers.expat.ParserCreate()
self._parser.StartElementHandler = self.starttag
self._parser.EndElementHandler = self.endtag
self._parser.CharacterDataHandler = self.handle_cdata
self._parser.StartNamespaceDeclHandler = self.handle_namespace_start
self._parser.buffer_text = True
self.Parse = self._parser.Parse
self.__depth = 0
self.__last_depth = 0
self.__max_depth = 0
self._dispatch_depth = 1
self._document_attrs = None
self._document_nsp = None
self._mini_dom=initial_node
self.last_is_data = 1
self._ptr=None
self.data_buffer = None
self.streamError = ''
if data:
self._parser.Parse(data,1)
def check_data_buffer(self):
if self.data_buffer:
self._ptr.data.append(''.join(self.data_buffer))
del self.data_buffer[:]
self.data_buffer = None
def destroy(self):
""" Method used to allow class instance to be garbage-collected. """
self.check_data_buffer()
self._parser.StartElementHandler = None
self._parser.EndElementHandler = None
self._parser.CharacterDataHandler = None
self._parser.StartNamespaceDeclHandler = None
def starttag(self, tag, attrs):
"""XML Parser callback. Used internally"""
self.check_data_buffer()
self._inc_depth()
self.DEBUG(DBG_NODEBUILDER, "DEPTH -> %i , tag -> %s, attrs -> %s" % (self.__depth, tag, `attrs`), 'down')
if self.__depth == self._dispatch_depth:
if not self._mini_dom :
self._mini_dom = Node(tag=tag, attrs=attrs, nsp = self._document_nsp, node_built=True)
else:
Node.__init__(self._mini_dom,tag=tag, attrs=attrs, nsp = self._document_nsp, node_built=True)
self._ptr = self._mini_dom
elif self.__depth > self._dispatch_depth:
self._ptr.kids.append(Node(tag=tag,parent=self._ptr,attrs=attrs, node_built=True))
self._ptr = self._ptr.kids[-1]
if self.__depth == 1:
self._document_attrs = {}
self._document_nsp = {}
nsp, name = (['']+tag.split(':'))[-2:]
for attr,val in attrs.items():
if attr == 'xmlns':
self._document_nsp[u''] = val
elif attr.startswith('xmlns:'):
self._document_nsp[attr[6:]] = val
else:
self._document_attrs[attr] = val
ns = self._document_nsp.get(nsp, 'http://www.gajim.org/xmlns/undeclared-root')
try:
self.stream_header_received(ns, name, attrs)
except ValueError, e:
self._document_attrs = None
raise ValueError(str(e))
if not self.last_is_data and self._ptr.parent:
self._ptr.parent.data.append('')
self.last_is_data = 0
def endtag(self, tag ):
"""XML Parser callback. Used internally"""
self.DEBUG(DBG_NODEBUILDER, "DEPTH -> %i , tag -> %s" % (self.__depth, tag), 'up')
self.check_data_buffer()
if self.__depth == self._dispatch_depth:
if self._mini_dom.getName() == 'error':
self.streamError = self._mini_dom.getChildren()[0].getName()
self.dispatch(self._mini_dom)
elif self.__depth > self._dispatch_depth:
self._ptr = self._ptr.parent
else:
self.DEBUG(DBG_NODEBUILDER, "Got higher than dispatch level. Stream terminated?", 'stop')
self._dec_depth()
self.last_is_data = 0
if self.__depth == 0: self.stream_footer_received()
def handle_cdata(self, data):
"""XML Parser callback. Used internally"""
self.DEBUG(DBG_NODEBUILDER, data, 'data')
if self.last_is_data:
if self.data_buffer:
self.data_buffer.append(data)
elif self._ptr:
self.data_buffer = [data]
self.last_is_data = 1
def handle_namespace_start(self, prefix, uri):
"""XML Parser callback. Used internally"""
self.check_data_buffer()
def DEBUG(self, level, text, comment=None):
""" Gets all NodeBuilder walking events. Can be used for debugging if redefined."""
def getDom(self):
""" Returns just built Node. """
self.check_data_buffer()
return self._mini_dom
def dispatch(self,stanza):
""" Gets called when the NodeBuilder reaches some level of depth on it's way up with the built
node as argument. Can be redefined to convert incoming XML stanzas to program events. """
def stream_header_received(self,ns,tag,attrs):
""" Method called when stream just opened. """
self.check_data_buffer()
def stream_footer_received(self):
""" Method called when stream just closed. """
self.check_data_buffer()
def has_received_endtag(self, level=0):
""" Return True if at least one end tag was seen (at level) """
return self.__depth <= level and self.__max_depth > level
def _inc_depth(self):
self.__last_depth = self.__depth
self.__depth += 1
self.__max_depth = max(self.__depth, self.__max_depth)
def _dec_depth(self):
self.__last_depth = self.__depth
self.__depth -= 1
def XML2Node(xml):
""" Converts supplied textual string into XML node. Handy f.e. for reading configuration file.
Raises xml.parser.expat.parsererror if provided string is not well-formed XML. """
return NodeBuilder(xml).getDom()
def BadXML2Node(xml):
""" Converts supplied textual string into XML node. Survives if xml data is cutted half way round.
I.e. "<html>some text <br>some more text". Will raise xml.parser.expat.parsererror on misplaced
tags though. F.e. "<b>some text <br>some more text</b>" will not work."""
return NodeBuilder(xml).getDom()
|
gpl-3.0
|
fbarreir/panda-server
|
pandaserver/test/deleteJobs.py
|
2
|
5897
|
import os
import re
import sys
import time
import fcntl
import types
import shelve
import random
import datetime
import commands
import threading
import userinterface.Client as Client
from dataservice.DDM import ddm
from dataservice.DDM import dashBorad
from taskbuffer.OraDBProxy import DBProxy
from taskbuffer.TaskBuffer import taskBuffer
from pandalogger.PandaLogger import PandaLogger
from jobdispatcher.Watcher import Watcher
from brokerage.SiteMapper import SiteMapper
from dataservice.Adder import Adder
from dataservice.Finisher import Finisher
from dataservice.MailUtils import MailUtils
from taskbuffer import ProcessGroups
import brokerage.broker_util
import brokerage.broker
import taskbuffer.ErrorCode
import dataservice.DDM
# password
from config import panda_config
passwd = panda_config.dbpasswd
# logger
_logger = PandaLogger().getLogger('deleteJobs')
_logger.debug("===================== start =====================")
# memory checker
def _memoryCheck(str):
try:
proc_status = '/proc/%d/status' % os.getpid()
procfile = open(proc_status)
name = ""
vmSize = ""
vmRSS = ""
# extract Name,VmSize,VmRSS
for line in procfile:
if line.startswith("Name:"):
name = line.split()[-1]
continue
if line.startswith("VmSize:"):
vmSize = ""
for item in line.split()[1:]:
vmSize += item
continue
if line.startswith("VmRSS:"):
vmRSS = ""
for item in line.split()[1:]:
vmRSS += item
continue
procfile.close()
_logger.debug('MemCheck - %s Name=%s VSZ=%s RSS=%s : %s' % (os.getpid(),name,vmSize,vmRSS,str))
except:
type, value, traceBack = sys.exc_info()
_logger.error("memoryCheck() : %s %s" % (type,value))
_logger.debug('MemCheck - %s unknown : %s' % (os.getpid(),str))
return
_memoryCheck("start")
# kill old process
try:
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
# get process list
scriptName = sys.argv[0]
out = commands.getoutput('ps axo user,pid,lstart,args | grep %s' % scriptName)
for line in out.split('\n'):
items = line.split()
# owned process
if not items[0] in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
continue
# look for python
if re.search('python',line) == None:
continue
# PID
pid = items[1]
# start time
timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# kill old process
if startTime < timeLimit:
_logger.debug("old process : %s %s" % (pid,startTime))
_logger.debug(line)
commands.getoutput('kill -9 %s' % pid)
except:
type, value, traceBack = sys.exc_info()
_logger.error("kill process : %s %s" % (type,value))
# instantiate TB
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
# table names
jobATableName = "ATLAS_PANDAARCH.jobsArchived"
filesATableName = "ATLAS_PANDAARCH.filesTable_ARCH"
paramATableName = "ATLAS_PANDAARCH.jobParamsTable_ARCH"
metaATableName = "ATLAS_PANDAARCH.metaTable_ARCH"
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=3)
# delete
_logger.debug("get PandaIDs for Delete")
sql = "SELECT COUNT(*) FROM ATLAS_PANDA.jobsArchived4 WHERE modificationTime<:modificationTime"
varMap = {}
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sql,varMap)
if res != None:
tmpTotal = res[0][0]
else:
tmpTotal = None
maxBunch = 1000
nBunch = 500
tmpIndex = 0
while True:
sql = "SELECT PandaID,modificationTime FROM ATLAS_PANDA.jobsArchived4 "
sql += "WHERE modificationTime<:modificationTime AND archivedFlag=:archivedFlag AND rownum<=:rowRange"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':archivedFlag'] = 1
varMap[':rowRange'] = maxBunch
status,res = taskBuffer.querySQLS(sql,varMap)
if res == None:
_logger.error("failed to get PandaIDs to be deleted")
break
else:
_logger.debug("got %s for deletion" % len(res))
if len(res) == 0:
_logger.debug("no jobs left for for deletion")
break
else:
maxBunch = len(res)
random.shuffle(res)
res = res[:nBunch]
# loop over all jobs
for (id,srcEndTime) in res:
tmpIndex += 1
try:
# check
sql = "SELECT PandaID from %s WHERE PandaID=:PandaID" % jobATableName
varMap = {}
varMap[':PandaID'] = id
status,check = taskBuffer.querySQLS(sql,varMap)
if check == None or len(check) == 0:
# no record in ArchivedDB
_logger.error("No backup for %s" % id)
else:
# delete
_logger.debug("DEL %s : endTime %s" % (id,srcEndTime))
proxyS = taskBuffer.proxyPool.getProxy()
proxyS.deleteJobSimple(id)
taskBuffer.proxyPool.putProxy(proxyS)
if tmpIndex % 1000 == 1:
_logger.debug(" deleted %s/%s" % (tmpIndex,tmpTotal))
except:
pass
# terminate
if maxBunch < nBunch:
break
_logger.debug("===================== end =====================")
|
apache-2.0
|
TheMutley/openpilot
|
pyextra/gunicorn/six.py
|
320
|
27344
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.8.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
|
mit
|
savoirfairelinux/OpenUpgrade
|
addons/website/models/ir_http.py
|
20
|
10228
|
# -*- coding: utf-8 -*-
import datetime
import hashlib
import logging
import re
import traceback
import werkzeug
import werkzeug.routing
import openerp
from openerp.addons.base import ir
from openerp.addons.base.ir import ir_qweb
from openerp.addons.website.models.website import slug, url_for
from openerp.http import request
from openerp.osv import orm
logger = logging.getLogger(__name__)
class RequestUID(object):
def __init__(self, **kw):
self.__dict__.update(kw)
class ir_http(orm.AbstractModel):
_inherit = 'ir.http'
rerouting_limit = 10
def _get_converters(self):
return dict(
super(ir_http, self)._get_converters(),
model=ModelConverter,
page=PageConverter,
)
def _auth_method_public(self):
# TODO: select user_id from matching website
if not request.session.uid:
request.uid = self.pool['ir.model.data'].xmlid_to_res_id(request.cr, openerp.SUPERUSER_ID, 'base.public_user')
else:
request.uid = request.session.uid
def _dispatch(self):
first_pass = not hasattr(request, 'website')
request.website = None
func = None
try:
func, arguments = self._find_handler()
request.website_enabled = func.routing.get('website', False)
except werkzeug.exceptions.NotFound:
# either we have a language prefixed route, either a real 404
# in all cases, website processes them
request.website_enabled = True
request.website_multilang = request.website_enabled and func and func.routing.get('multilang', True)
if request.website_enabled:
if func:
self._authenticate(func.routing['auth'])
else:
self._auth_method_public()
request.redirect = lambda url: werkzeug.utils.redirect(url_for(url))
request.website = request.registry['website'].get_current_website(request.cr, request.uid, context=request.context)
if first_pass:
request.lang = request.website.default_lang_code
request.context['lang'] = request.lang
if not func:
path = request.httprequest.path.split('/')
langs = [lg[0] for lg in request.website.get_languages()]
if path[1] in langs:
request.lang = request.context['lang'] = path.pop(1)
path = '/'.join(path) or '/'
if request.lang == request.website.default_lang_code:
# If language is in the url and it is the default language, redirect
# to url without language so google doesn't see duplicate content
return request.redirect(path + '?' + request.httprequest.query_string)
return self.reroute(path)
return self._handle_exception(code=404)
return super(ir_http, self)._dispatch()
def reroute(self, path):
if not hasattr(request, 'rerouting'):
request.rerouting = [request.httprequest.path]
if path in request.rerouting:
raise Exception("Rerouting loop is forbidden")
request.rerouting.append(path)
if len(request.rerouting) > self.rerouting_limit:
raise Exception("Rerouting limit exceeded")
request.httprequest.environ['PATH_INFO'] = path
# void werkzeug cached_property. TODO: find a proper way to do this
for key in ('path', 'full_path', 'url', 'base_url'):
request.httprequest.__dict__.pop(key, None)
return self._dispatch()
def _postprocess_args(self, arguments, rule):
if not getattr(request, 'website_enabled', False):
return super(ir_http, self)._postprocess_args(arguments, rule)
for arg, val in arguments.items():
# Replace uid placeholder by the current request.uid
if isinstance(val, orm.browse_record) and isinstance(val._uid, RequestUID):
val._uid = request.uid
try:
_, path = rule.build(arguments)
assert path is not None
except Exception:
return self._handle_exception(werkzeug.exceptions.NotFound())
if request.httprequest.method in ('GET', 'HEAD'):
generated_path = werkzeug.url_unquote_plus(path)
current_path = werkzeug.url_unquote_plus(request.httprequest.path)
if generated_path != current_path:
if request.lang != request.website.default_lang_code:
path = '/' + request.lang + path
if request.httprequest.query_string:
path += '?' + request.httprequest.query_string
return werkzeug.utils.redirect(path)
def _serve_attachment(self):
domain = [('type', '=', 'binary'), ('url', '=', request.httprequest.path)]
attach = self.pool['ir.attachment'].search_read(request.cr, openerp.SUPERUSER_ID, domain, ['__last_update', 'datas', 'mimetype'], context=request.context)
if attach:
wdate = attach[0]['__last_update']
datas = attach[0]['datas']
response = werkzeug.wrappers.Response()
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
try:
response.last_modified = datetime.datetime.strptime(wdate, server_format + '.%f')
except ValueError:
# just in case we have a timestamp without microseconds
response.last_modified = datetime.datetime.strptime(wdate, server_format)
response.set_etag(hashlib.sha1(datas).hexdigest())
response.make_conditional(request.httprequest)
if response.status_code == 304:
return response
response.mimetype = attach[0]['mimetype']
response.data = datas.decode('base64')
return response
def _handle_exception(self, exception=None, code=500):
try:
return super(ir_http, self)._handle_exception(exception)
except Exception:
attach = self._serve_attachment()
if attach:
return attach
if getattr(request, 'website_enabled', False) and request.website:
values = dict(
exception=exception,
traceback=traceback.format_exc(exception),
)
if exception:
code = getattr(exception, 'code', code)
if isinstance(exception, ir_qweb.QWebException):
values.update(qweb_exception=exception)
if isinstance(exception.qweb.get('cause'), openerp.exceptions.AccessError):
code = 403
if code == 500:
logger.error("500 Internal Server Error:\n\n%s", values['traceback'])
if 'qweb_exception' in values:
view = request.registry.get("ir.ui.view")
views = view._views_get(request.cr, request.uid, exception.qweb['template'], request.context)
to_reset = [v for v in views if v.model_data_id.noupdate is True]
values['views'] = to_reset
elif code == 403:
logger.warn("403 Forbidden:\n\n%s", values['traceback'])
values.update(
status_message=werkzeug.http.HTTP_STATUS_CODES[code],
status_code=code,
)
if not request.uid:
self._auth_method_public()
try:
html = request.website._render('website.%s' % code, values)
except Exception:
html = request.website._render('website.http_error', values)
return werkzeug.wrappers.Response(html, status=code, content_type='text/html;charset=utf-8')
raise
class ModelConverter(ir.ir_http.ModelConverter):
def __init__(self, url_map, model=False, domain='[]'):
super(ModelConverter, self).__init__(url_map, model)
self.domain = domain
self.regex = r'(?:[A-Za-z0-9-_]+?-)?(\d+)(?=$|/)'
def to_url(self, value):
return slug(value)
def to_python(self, value):
m = re.match(self.regex, value)
_uid = RequestUID(value=value, match=m, converter=self)
return request.registry[self.model].browse(
request.cr, _uid, int(m.group(1)), context=request.context)
def generate(self, cr, uid, query=None, args=None, context=None):
obj = request.registry[self.model]
domain = eval( self.domain, (args or {}).copy())
if query:
domain.append((obj._rec_name, 'ilike', '%'+query+'%'))
for record in obj.search_read(cr, uid, domain=domain, fields=['write_date',obj._rec_name], context=context):
if record.get(obj._rec_name, False):
yield {'loc': (record['id'], record[obj._rec_name])}
class PageConverter(werkzeug.routing.PathConverter):
""" Only point of this converter is to bundle pages enumeration logic """
def generate(self, cr, uid, query=None, args={}, context=None):
View = request.registry['ir.ui.view']
views = View.search_read(cr, uid, [['page', '=', True]],
fields=['xml_id','priority','write_date'], order='name', context=context)
for view in views:
xid = view['xml_id'].startswith('website.') and view['xml_id'][8:] or view['xml_id']
# the 'page/homepage' url is indexed as '/', avoid aving the same page referenced twice
# when we will have an url mapping mechanism, replace this by a rule: page/homepage --> /
if xid=='homepage': continue
if query and query.lower() not in xid.lower():
continue
record = {'loc': xid}
if view['priority'] <> 16:
record['__priority'] = min(round(view['priority'] / 32.0,1), 1)
if view['write_date']:
record['__lastmod'] = view['write_date'][:10]
yield record
|
agpl-3.0
|
XiaosongWei/crosswalk-test-suite
|
webapi/tct-vibration-w3c-tests/inst.xpk.py
|
456
|
6809
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
bsd-3-clause
|
bsipocz/statsmodels
|
statsmodels/tools/tests/test_web.py
|
27
|
1524
|
from statsmodels.tools.web import _generate_url, webdoc
from statsmodels.regression.linear_model import OLS
from unittest import TestCase
from nose.tools import assert_equal, assert_raises
from numpy import array
class TestWeb(TestCase):
def test_string(self):
url = _generate_url('arch',True)
assert_equal(url, 'http://statsmodels.sourceforge.net/stable/search.html?q=arch&check_keywords=yes&area=default')
url = _generate_url('arch',False)
assert_equal(url, 'http://statsmodels.sourceforge.net/devel/search.html?q=arch&check_keywords=yes&area=default')
url = _generate_url('dickey fuller',False)
assert_equal(url, 'http://statsmodels.sourceforge.net/devel/search.html?q=dickey+fuller&check_keywords=yes&area=default')
def test_function(self):
url = _generate_url(OLS, True)
assert_equal(url, 'http://statsmodels.sourceforge.net/stable/generated/statsmodels.regression.linear_model.OLS.html')
url = _generate_url(OLS, False)
assert_equal(url, 'http://statsmodels.sourceforge.net/devel/generated/statsmodels.regression.linear_model.OLS.html')
def test_nothing(self):
url = _generate_url(None, True)
assert_equal(url, 'http://statsmodels.sourceforge.net/stable/')
url = _generate_url(None, False)
assert_equal(url, 'http://statsmodels.sourceforge.net/devel/')
def test_errors(self):
assert_raises(ValueError, webdoc, array, True)
assert_raises(ValueError, webdoc, 1, False)
|
bsd-3-clause
|
jamestwebber/scipy
|
scipy/sparse/csgraph/tests/test_reordering.py
|
1
|
2686
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal
from scipy.sparse.csgraph import reverse_cuthill_mckee, structural_rank
from scipy.sparse import diags, csc_matrix, csr_matrix, coo_matrix
def test_graph_reverse_cuthill_mckee():
A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
graph = csr_matrix(A)
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
assert_equal(perm, correct_perm)
# Test int64 indices input
graph.indices = graph.indices.astype('int64')
graph.indptr = graph.indptr.astype('int64')
perm = reverse_cuthill_mckee(graph, True)
assert_equal(perm, correct_perm)
def test_graph_reverse_cuthill_mckee_ordering():
data = np.ones(63,dtype=int)
rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
9, 10, 10, 10, 10, 10, 11, 11, 11, 11,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
14, 15, 15, 15, 15, 15])
cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13,
15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
5, 7, 10, 13, 15])
graph = coo_matrix((data, (rows,cols))).tocsr()
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
0, 13, 7, 5, 9, 11, 1, 3])
assert_equal(perm, correct_perm)
def test_graph_structural_rank():
# Test square matrix #1
A = csc_matrix([[1, 1, 0],
[1, 0, 1],
[0, 1, 0]])
assert_equal(structural_rank(A), 3)
# Test square matrix #2
rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7])
cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4])
data = np.ones_like(rows)
B = coo_matrix((data,(rows,cols)), shape=(8,8))
assert_equal(structural_rank(B), 6)
#Test non-square matrix
C = csc_matrix([[1, 0, 2, 0],
[2, 0, 4, 0]])
assert_equal(structural_rank(C), 2)
#Test tall matrix
assert_equal(structural_rank(C.T), 2)
|
bsd-3-clause
|
Lemma1/MAC-POSTS
|
doc_builder/sphinx-contrib/httpdomain/test/bottle_test.py
|
4
|
1934
|
import unittest
from sphinxcontrib.autohttp.bottle import get_routes
from bottle import Bottle, Route
def create_app():
app = Bottle()
@app.route("/bottle")
def bottle_bottle():
return 12
@app.post("/bottle/post/")
def bottle_bottle_post():
return 23
return app
def create_app_mount():
app = create_app()
another_app = Bottle()
@another_app.route("/mount/")
def another_mount():
pass
app.mount("/mount/", another_app)
return app
def create_app_filter():
app = Bottle()
@app.route("/hello/<name>")
def bottle_hello_name(name):
return name
return app
class BottleTest(unittest.TestCase):
def test_get_routes(self):
routes = list(get_routes(create_app()))
# order is not deterministic:
routes = sorted(routes, key=lambda x: x[1])
self.assertEqual(len(routes), 2)
self.assertEqual(len(routes[0]), 3)
self.assertEqual(routes[0][0], "GET")
self.assertEqual(routes[0][1], "/bottle")
self.assertEqual(routes[0][2].callback(), 12)
self.assertEqual(type(routes[0][2]), Route)
self.assertEqual(len(routes[1]), 3)
self.assertEqual(routes[1][0], "POST")
self.assertEqual(routes[1][1], "/bottle/post/")
self.assertEqual(routes[1][2].callback(), 23)
self.assertEqual(type(routes[1][2]), Route)
def test_get_routes_mount(self):
routes = list(get_routes(create_app_mount()))
routes = sorted(routes, key=lambda x: x[1])
self.assertEqual(len(routes), 3)
# not sure about this:
self.assertEqual(routes[2][1], "/mount/(:re:.*)")
def test_get_routes_filter(self):
routes = list(get_routes(create_app_filter()))
routes = sorted(routes, key=lambda x: x[1])
self.assertEqual(len(routes), 1)
self.assertEqual(routes[0][1], "/hello/(name)")
|
mit
|
ratoaq2/knowit
|
knowit/core.py
|
1
|
7027
|
import typing
from logging import NullHandler, getLogger
logger = getLogger(__name__)
logger.addHandler(NullHandler())
T = typing.TypeVar('T')
_visible_chars_table = dict.fromkeys(range(32))
def _is_unknown(value: typing.Any) -> bool:
return isinstance(value, str) and (not value or value.lower() == 'unknown')
class Reportable(typing.Generic[T]):
"""Reportable abstract class."""
def __init__(
self,
*args: str,
description: typing.Optional[str] = None,
reportable: bool = True,
):
"""Initialize the object."""
self.names = args
self._description = description
self.reportable = reportable
@property
def description(self) -> str:
"""Rule description."""
return self._description or '|'.join(self.names)
def report(self, value: typing.Union[str, T], context: typing.MutableMapping) -> None:
"""Report unknown value."""
if not value or not self.reportable:
return
if 'report' in context:
report_map = context['report'].setdefault(self.description, {})
if value not in report_map:
report_map[value] = context['path']
logger.info('Invalid %s: %r', self.description, value)
class Property(Reportable[T]):
"""Property class."""
def __init__(
self,
*args: str,
default: typing.Optional[T] = None,
private: bool = False,
description: typing.Optional[str] = None,
delimiter: str = ' / ',
**kwargs,
):
"""Init method."""
super().__init__(*args, description=description, **kwargs)
self.default = default
self.private = private
# Used to detect duplicated values. e.g.: en / en or [email protected] / [email protected] or Progressive / Progressive
self.delimiter = delimiter
def extract_value(
self,
track: typing.Mapping,
context: typing.MutableMapping,
) -> typing.Optional[T]:
"""Extract the property value from a given track."""
for name in self.names:
names = name.split('.')
value = track.get(names[0], {}).get(names[1]) if len(names) == 2 else track.get(name)
if value is None:
if self.default is None:
continue
value = self.default
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, str):
value = value.translate(_visible_chars_table).strip()
if _is_unknown(value):
continue
value = self._deduplicate(value)
result = self.handle(value, context)
if result is not None and not _is_unknown(result):
return result
return None
@classmethod
def _deduplicate(cls, value: str) -> str:
values = value.split(' / ')
if len(values) == 2 and values[0] == values[1]:
return values[0]
return value
def handle(self, value: T, context: typing.MutableMapping) -> typing.Optional[T]:
"""Return the value without any modification."""
return value
class Configurable(Property[T]):
"""Configurable property where values are in a config mapping."""
def __init__(self, config: typing.Mapping[str, typing.Mapping], *args: str,
config_key: typing.Optional[str] = None, **kwargs):
"""Init method."""
super().__init__(*args, **kwargs)
self.mapping = getattr(config, config_key or self.__class__.__name__) if config else {}
@classmethod
def _extract_key(cls, value: str) -> typing.Union[str, bool]:
return value.upper()
@classmethod
def _extract_fallback_key(cls, value: str, key: str) -> typing.Optional[T]:
return None
def _lookup(
self,
key: str,
context: typing.MutableMapping,
) -> typing.Union[T, None, bool]:
result = self.mapping.get(key)
if result is not None:
result = getattr(result, context.get('profile') or 'default')
return result if result != '__ignored__' else False
return None
def handle(self, value, context):
"""Return Variable or Constant."""
key = self._extract_key(value)
if key is False:
return
result = self._lookup(key, context)
if result is False:
return
while not result and key:
key = self._extract_fallback_key(value, key)
result = self._lookup(key, context)
if result is False:
return
if not result:
self.report(value, context)
return result
class MultiValue(Property):
"""Property with multiple values."""
def __init__(self, prop: typing.Optional[Property] = None, delimiter='/', single=False,
handler=None, name=None, **kwargs):
"""Init method."""
super().__init__(*(prop.names if prop else (name,)), **kwargs)
self.prop = prop
self.delimiter = delimiter
self.single = single
self.handler = handler
def handle(
self,
value: str,
context: typing.MutableMapping,
) -> typing.Union[T, typing.List[T]]:
"""Handle properties with multiple values."""
if self.handler:
call = self.handler
elif self.prop:
call = self.prop.handle
else:
raise NotImplementedError('No handler available')
result = call(value, context)
if result is not None:
return result
if isinstance(value, list):
if len(value) == 1:
values = self._split(value[0], self.delimiter)
else:
values = value
else:
values = self._split(value, self.delimiter)
if values is None:
return call(values, context)
if len(values) > 1 and not self.single:
results = [call(item, context) if not _is_unknown(item) else None for item in values]
results = [r for r in results if r is not None]
if results:
return results
return call(values[0], context)
@classmethod
def _split(
cls,
value: typing.Optional[T],
delimiter: str = '/',
) -> typing.Optional[typing.List[str]]:
if value is None:
return None
return [x.strip() for x in str(value).split(delimiter)]
class Rule(Reportable[T]):
"""Rule abstract class."""
def __init__(self, name: str, override=False, **kwargs):
"""Initialize the object."""
super().__init__(name, **kwargs)
self.override = override
def execute(self, props, pv_props, context: typing.Mapping):
"""How to execute a rule."""
raise NotImplementedError
|
mit
|
mclumd/swarm-simulator
|
tests/vectors_tests.py
|
1
|
6590
|
# tests.vectors_tests.py
# Tests for the vectors package
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Thu Apr 24 09:57:20 2014 -0400
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: vectors_tests.py [] [email protected] $
"""
Tests for the vectors package
"""
##########################################################################
## Imports
##########################################################################
import math
import unittest
import numpy as np
from swarm.vectors import *
##########################################################################
## Vectors Test Case
##########################################################################
class VectorsTests(unittest.TestCase):
def assertArrayNotWritable(self, arr):
"""
Ensure that an array is not writable.
"""
with self.assertRaisesRegexp(ValueError, "assignment destination is read-only"):
arr[0] = 1.0
def test_arr_view(self):
"""
Test vector contstruction from a np.array
"""
vec = Vector.arr(np.array([10, 10]))
self.assertTrue(isinstance(vec, Vector))
self.assertTrue(isinstance(vec, np.ndarray))
self.assertEqual(vec.x, 10)
self.assertEqual(vec.y, 10)
self.assertArrayNotWritable(vec)
def test_zero_view(self):
"""
Test the zero vector constuction
"""
vec = Vector.zero()
self.assertTrue(isinstance(vec, Vector))
self.assertTrue(isinstance(vec, np.ndarray))
self.assertEqual(vec.x, 0)
self.assertEqual(vec.y, 0)
self.assertArrayNotWritable(vec)
def test_arrp_view(self):
"""
Test the python arr vector construction
"""
vec = Vector.arrp(10,0)
self.assertTrue(isinstance(vec, Vector))
self.assertTrue(isinstance(vec, np.ndarray))
self.assertEqual(vec.x, 10)
self.assertEqual(vec.y, 0)
self.assertArrayNotWritable(vec)
def test_rand_high_view(self):
"""
Test the random vector constructor with high limit
"""
vec = Vector.rand(12)
self.assertTrue(isinstance(vec, Vector))
self.assertTrue(isinstance(vec, np.ndarray))
self.assertLess(vec.x, 12)
self.assertLess(vec.y, 12)
self.assertGreaterEqual(vec.x, 0)
self.assertGreaterEqual(vec.y, 0)
self.assertArrayNotWritable(vec)
def test_rand_range_view(self):
"""
Test the random vector constructor with range
"""
vec = Vector.rand(6, 12)
self.assertTrue(isinstance(vec, Vector))
self.assertTrue(isinstance(vec, np.ndarray))
self.assertLess(vec.x, 12)
self.assertLess(vec.y, 12)
self.assertGreaterEqual(vec.x, 6)
self.assertGreaterEqual(vec.y, 6)
self.assertArrayNotWritable(vec)
def test_unit(self):
"""
Test the computation of the unit vector
"""
cases = (
(Vector.arrp(0, 10), Vector.arrp(0,1)),
(Vector.arrp(10, 0), Vector.arrp(1,0)),
(Vector.arrp(10, 10), Vector.arrp( 0.70710678, 0.70710678)),
(Vector.zero(), Vector.zero()),
)
for case, expected in cases:
self.assertEqual(expected, case.unit)
def test_length(self):
"""
Test computation of the vector length
"""
cases = (
(Vector.arrp(0, 10), 10),
(Vector.arrp(10, 0), 10),
(Vector.arrp(10, 10), 14.142135623730951),
(Vector.zero(), 0.0)
)
for case, expected in cases:
self.assertEqual(expected, case.length)
def test_orthogonal(self):
"""
Test the computation of the orthogonal vector
"""
cases = (
(Vector.arrp(0, 10), Vector.arrp(-1,0)),
(Vector.arrp(10, 0), Vector.arrp(0,1)),
(Vector.arrp(10, 10), Vector.arrp(-0.70710678, 0.70710678)),
(Vector.arrp(-10, -10), Vector.arrp(0.70710678, -0.70710678)),
)
for case, expected in cases:
self.assertEqual(expected, case.orthogonal)
def test_angle_degrees(self):
"""
Test computation of the angle in degrees
Are these angles correct?
"""
A = Vector.arrp(10, 0)
B = Vector.arrp(0, 10)
E = Vector.arrp(10, 10)
C = Vector.arrp(-10, 0)
D = Vector.arrp(0, -10)
F = Vector.arrp(-10,-10)
cases = (
(A.angle(B), 90.0),
(B.angle(A), 90.0),
(A.angle(E), 45.0),
(E.angle(F), 180.0),
(E.angle(C), 135.0),
(E.angle(D), 135.0),
(B.angle(B), 0.0)
)
for case, expected in cases:
self.assertAlmostEqual(case, expected, places=4)
def test_angle_radians(self):
"""
Test computation of the angle in radians
"""
A = Vector.arrp(10, 0)
B = Vector.arrp(0, 10)
E = Vector.arrp(10, 10)
C = Vector.arrp(-10, 0)
D = Vector.arrp(0, -10)
F = Vector.arrp(-10,-10)
cases = (
(A.angle(B, False), 0.5*np.pi),
(B.angle(A, False), 0.5*np.pi),
(A.angle(E, False), 0.25*np.pi),
(E.angle(F, False), np.pi),
(E.angle(C, False), .75*np.pi),
(E.angle(D, False), .75*np.pi),
(B.angle(B, False), 0.0)
)
for case, expected in cases:
self.assertAlmostEqual(case, expected, places=4)
def test_distance(self):
"""
Test vector distance computations
"""
A = Vector.arrp(23, 7)
cases = (
Vector.arrp(27, 10),
Vector.arrp(27, 4),
Vector.arrp(19, 4),
Vector.arrp(19, 10),
)
for case in cases:
self.assertEqual(A.distance(case), 5.0)
def test_equality(self):
"""
Test two vectors are equal or not equal
"""
A = Vector.arrp(42.0000000000000, 13.000000000000)
B = Vector.arrp(42.0000000000001, 12.999999999999)
C = Vector.arrp(7.0, 7.0)
self.assertIsNot(A, B)
self.assertEqual(A, B)
self.assertNotEqual(A, C)
def test_copy(self):
"""
Check that you can copy a readonly vector
"""
A = Vector.arrp(23, 52)
B = A.copy()
self.assertIsNot(A,B)
self.assertEqual(A,B)
|
mit
|
vmlaker/mpipe
|
src/OrderedWorker.py
|
1
|
7319
|
"""Implements OrderedWorker class."""
import multiprocessing
from .TubeP import TubeP
class OrderedWorker(multiprocessing.Process):
"""An OrderedWorker object operates in a stage where the order
of output results always matches that of corresponding input tasks.
A worker is linked to its two nearest neighbors -- the previous
worker and the next -- all workers in the stage thusly connected
in circular fashion.
Input tasks are fetched in this order. Before publishing its result,
a worker first waits for its previous neighbor to do the same."""
def __init__(self):
pass
def init2(
self,
input_tube, # Read task from the input tube.
output_tubes, # Send result on all the output tubes.
num_workers, # Total number of workers in the stage.
disable_result, # Whether to override any result with None.
do_stop_task, # Whether to call doTask() on "stop" request.
):
"""Create *num_workers* worker objects with *input_tube* and
an iterable of *output_tubes*. The worker reads a task from *input_tube*
and writes the result to *output_tubes*."""
super(OrderedWorker, self).__init__()
self._tube_task_input = input_tube
self._tubes_result_output = output_tubes
self._num_workers = num_workers
# Serializes reading from input tube.
self._lock_prev_input = None
self._lock_next_input = None
# Serializes writing to output tube.
self._lock_prev_output = None
self._lock_next_output = None
self._disable_result = disable_result
self._do_stop_task = do_stop_task
@staticmethod
def getTubeClass():
"""Return the tube class implementation."""
return TubeP
@classmethod
def assemble(
cls,
args,
input_tube,
output_tubes,
size,
disable_result=False,
do_stop_task=False,
):
"""Create, assemble and start workers.
Workers are created of class *cls*, initialized with *args*, and given
task/result communication channels *input_tube* and *output_tubes*.
The number of workers created is according to *size* parameter.
*do_stop_task* indicates whether doTask() will be called for "stop" request.
"""
# Create the workers.
workers = []
for ii in range(size):
worker = cls(**args)
worker.init2(
input_tube,
output_tubes,
size,
disable_result,
do_stop_task,
)
workers.append(worker)
# Connect the workers.
for ii in range(size):
worker_this = workers[ii]
worker_prev = workers[ii-1]
worker_prev._link(
worker_this,
next_is_first=(ii==0), # Designate 0th worker as the first.
)
# Start the workers.
for worker in workers:
worker.start()
def _link(self, next_worker, next_is_first=False):
"""Link the worker to the given next worker object,
connecting the two workers with communication tubes."""
lock = multiprocessing.Lock()
next_worker._lock_prev_input = lock
self._lock_next_input = lock
lock.acquire()
lock = multiprocessing.Lock()
next_worker._lock_prev_output = lock
self._lock_next_output = lock
lock.acquire()
# If the next worker is the first one, trigger it now.
if next_is_first:
self._lock_next_input.release()
self._lock_next_output.release()
def putResult(self, result):
"""Register the *result* by putting it on all the output tubes."""
self._lock_prev_output.acquire()
for tube in self._tubes_result_output:
tube.put((result, 0))
self._lock_next_output.release()
def run(self):
# Run implementation's initialization.
self.doInit()
while True:
try:
# Wait on permission from the previous worker that
# it is okay to retrieve the input task.
self._lock_prev_input.acquire()
# Retrieve the input task.
(task, count) = self._tube_task_input.get()
# Permit the next worker to retrieve the input task.
self._lock_next_input.release()
except:
(task, count) = (None, 0)
# In case the task is None, it represents the "stop" request,
# the count being the number of workers in this stage that had
# already stopped.
if task is None:
# If this worker is the last one (of its stage) to receive the
# "stop" request, propagate "stop" to the next stage. Otherwise,
# maintain the "stop" signal in this stage for another worker that
# will pick it up.
count += 1
if count == self._num_workers:
# Propagating the "stop" to the next stage does not require
# synchronization with previous and next worker because we're
# guaranteed (from the count value) that this is the last worker alive.
# Therefore, just put the "stop" signal on the result tube.
for tube in self._tubes_result_output:
tube.put((None, 0))
else:
self._tube_task_input.put((None, count))
# In case we're calling doTask() on a "stop" request, do so now.
if self._do_stop_task:
self.doTask(None)
# Honor the "stop" request by exiting the process.
break
# The task is not None, meaning that it is an actual task to
# be processed. Therefore let's call doTask().
result = self.doTask(task)
# Unless result is disabled,
# if doTask() actually returns a result (and the result is not None),
# it indicates that it did not call putResult(), instead intending
# it to be called now.
if not self._disable_result and result is not None:
self.putResult(result)
def doTask(self, task):
"""Implement this method in the subclass with work functionality
to be executed on each *task* object.
The implementation can publish the output result in one of two ways,
either by 1) calling :meth:`putResult` and returning ``None``, or
2) returning the result (other than ``None``)."""
return True
def doInit(self):
"""Implement this method in the subclass in case there's need
for additional initialization after process startup.
Since this class inherits from :class:`multiprocessing.Process`,
its constructor executes in the spawning process.
This method allows additional code to be run in the forked process,
before the worker begins processing input tasks.
"""
return None
|
mit
|
zuowang/voltdb
|
tests/scripts/examples/sql_coverage/partial-covering-schema.py
|
7
|
1943
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
{
"PARTIAL_COVERING_TREE" : {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("A1", FastSerializer.VOLTTYPE_INTEGER),
("A2", FastSerializer.VOLTTYPE_INTEGER),
("A3", FastSerializer.VOLTTYPE_INTEGER),
("A4", FastSerializer.VOLTTYPE_INTEGER)),
"partitions": (),
"indexes": ("ID",)
},
"PARTIAL_COVERING_HASH" : {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("A1", FastSerializer.VOLTTYPE_INTEGER),
("A2", FastSerializer.VOLTTYPE_INTEGER),
("A3", FastSerializer.VOLTTYPE_INTEGER),
("A4", FastSerializer.VOLTTYPE_INTEGER)),
"partitions": (),
"indexes": ("ID",)
}
}
|
agpl-3.0
|
gangadharkadam/v6_erp
|
erpnext/patches/v5_0/rename_total_fields.py
|
101
|
2313
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.utils.rename_field import rename_field
from frappe.modules import scrub, get_doctype_module
selling_doctypes = ("Quotation", "Sales Order", "Delivery Note", "Sales Invoice")
buying_doctypes = ("Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice")
selling_renamed_fields = (
("net_total", "base_net_total"),
("net_total_export", "net_total"),
("other_charges_total", "base_total_taxes_and_charges"),
("other_charges_total_export", "total_taxes_and_charges"),
("grand_total", "base_grand_total"),
("grand_total_export", "grand_total"),
("rounded_total", "base_rounded_total"),
("rounded_total_export", "rounded_total"),
("in_words", "base_in_words"),
("in_words_export", "in_words")
)
buying_renamed_fields = (
("net_total", "base_net_total"),
("net_total_import", "net_total"),
("grand_total", "base_grand_total"),
("grand_total_import", "grand_total"),
("rounded_total", "base_rounded_total"),
("in_words", "base_in_words"),
("in_words_import", "in_words"),
("other_charges_added", "base_taxes_and_charges_added"),
("other_charges_added_import", "taxes_and_charges_added"),
("other_charges_deducted", "base_taxes_and_charges_deducted"),
("other_charges_deducted_import", "taxes_and_charges_deducted"),
("total_tax", "base_total_taxes_and_charges")
)
def execute():
for doctypes, fields in [[selling_doctypes, selling_renamed_fields], [buying_doctypes, buying_renamed_fields]]:
for dt in doctypes:
frappe.reload_doc(get_doctype_module(dt), "doctype", scrub(dt))
table_columns = frappe.db.get_table_columns(dt)
base_net_total = frappe.db.sql("select sum(ifnull({0}, 0)) from `tab{1}`".format(fields[0][1], dt))[0][0]
if not base_net_total:
for f in fields:
if f[0] in table_columns:
rename_field(dt, f[0], f[1])
# Added new field "total_taxes_and_charges" in buying cycle, updating value
if dt in ("Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice"):
frappe.db.sql("""update `tab{0}` set total_taxes_and_charges =
round(base_total_taxes_and_charges/conversion_rate, 2)""".format(dt))
|
agpl-3.0
|
davenpcj5542009/eucalyptus
|
tools/imaging/eucatoolkit/stages/downloadimage.py
|
1
|
20080
|
#!/usr/bin/python -tt
# Copyright 2011-2012 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from argparse import ArgumentTypeError, ArgumentError
from urlparse import urlparse
import os
import sys
import subprocess
import traceback
from io import BytesIO
from downloadmanifest import DownloadManifest
from processutils import close_all_fds, open_pipe_fileobjs, spawn_process
from processutils import monitor_subprocess_io, wait_process_in_thread
from eucatoolkit import stages
class DownloadImage(object):
def __init__(self, dest_file=None, **kwargs):
parser = argparse.ArgumentParser(description=
"Download parts from manifest")
parser.add_argument('-m', '--manifest', dest='manifest', required=True,
help='''Path to 'download-manifest. Use '-' to read
manifest from stdin''')
parser.add_argument('-d', '--dest', dest='destination',
help='''Destination path to write image to.
Use '-' for stdout.''')
parser.add_argument('-k', '--privatekey', dest='privatekey',
help='''file containing the private key to decrypt
the bundle with.''')
parser.add_argument('-c', '--cloudcert', dest='cloudcert',
required=True,
help='''file containing the cloud cert used
to verify manifest signature.''')
parser.add_argument('-x', '--xsd', dest='xsd', default=None,
help='''Path to 'download-manifest xsd used
to validate manfiest xml.''')
parser.add_argument('--toolspath', dest='toolspath', default=None,
help='''Local path to euca2ools.''')
parser.add_argument('--debug', dest='debug', default=False,
action='store_true',
help='''Enable debug to a log file''')
parser.add_argument('--logfile', dest='logfile', default=None,
help='''log file path to write to''')
parser.add_argument('--loglevel', dest='loglevel', default='INFO',
help='''log level for output''')
parser.add_argument('--dumpmanifest', dest='dumpmanifest',
action='store_true', default=False,
help='''Get and show manifest then exit''')
parser.add_argument('--reportprogress', dest='reportprogress',
default=False, action='store_true',
help='''Output progress information to stderr''')
parser.add_argument('--destispipe', dest='destispipe',
default=False, action='store_true',
help='''Indicate that destination is a pipe''')
#Set any kwargs from init to default values for parsed args
#Handle the cli arguments...
if not kwargs:
arg_list = sys.argv[1:]
else:
arg_list = []
self.parser = parser
#Handle any kwargs at __init__ assign to argparse...
for kwarg in kwargs:
for key in parser._option_string_actions:
if parser._option_string_actions[key].dest == str(kwarg):
option = parser._option_string_actions[key]
arg_value = [option.option_strings[0]]
#Is there a better way to handle this for consts?
if not option.const:
arg_value.append(kwargs[kwarg])
arg_list.extend(arg_value)
self.args = parser.parse_args(arg_list)
if dest_file is not None:
self.args.destination = dest_file
if self.args.destination == "-":
force_stderr = True
else:
force_stderr = False
self.log = stages.get_logger(self.args.loglevel,
logfile=self.args.logfile,
force_stderr=force_stderr,
debug=self.args.debug)
self.log.debug('Parsed Args: ' + str(self.args))
self._setup()
def _setup(self):
'''
Basic setup of this module from args provided at init.
'''
self.log.debug('Starting configure...')
#Get optional destination directory...
dest_file = self.args.destination
if not isinstance(dest_file, file) and not (dest_file == "-"):
dest_file = os.path.expanduser(os.path.abspath(dest_file))
self.args.destination = dest_file
xsd_file = self.args.xsd
if xsd_file:
if not isinstance(xsd_file, file):
xsd_file = os.path.expanduser(os.path.abspath(xsd_file))
self.args.xsd = xsd_file
if not self.args.cloudcert:
raise argparse.ArgumentError(self.args.cloudcert,
"Cloud cert must be provided to "
"verify manifest signature")
#Read the manifest from src provided into a manifest obj...
self._get_download_manifest_obj()
def _read_manifest_from_stdin(self, read_fileobj=None, chunk_size=None):
'''
Attempts to read xml provided to stdin and convert it to a
downloadmanifest obj.
:returns downloadmanifest obj.
'''
chunk_size = chunk_size or stages._chunk_size
read_fileobj = read_fileobj or sys.stdin
self.log.debug('Reading Manifest from stdin')
fileobj = BytesIO()
while True:
chunk = read_fileobj.read(chunk_size)
if not chunk:
break
self.log.debug('Chunk:' + str(chunk))
fileobj.write(chunk)
fileobj.flush()
fileobj.seek(0)
with fileobj:
manifest = DownloadManifest._read_from_fileobj(
manifest_fileobj=fileobj,
xsd=self.args.xsd,
key_filename=self.args.privatekey,
sig_key_filename=self.args.cloudcert)
return manifest
def _read_manifest_from_file(self, filepath=None):
'''
Attempts to read xml contiained at localfile path and convert it to a
downloadmanifest obj.
:returns downloadmanifest obj.
'''
filepath = filepath or self.args.manifest
self.log.debug('Reading from local manifest file:' + str(filepath))
#Read manifest into BundleManifest obj...
return DownloadManifest.read_from_file(
filepath,
self.args.xsd,
key_filename=self.args.privatekey,
sig_key_filename=self.args.cloudcert)
def _read_manifest_from_url(self, url=None):
'''
Attempts to read xml provided at the provided url and convert it to a
downloadmanifest obj.
:returns downloadmanifest obj.
'''
url = url or self.args.manifest
self.log.debug('Reading from remote manifest from url: ' + str(url))
return DownloadManifest.read_from_url(
manifest_url=url,
xsd=self.args.xsd,
key_filename=self.args.privatekey,
sig_key_filename=self.args.cloudcert)
def _get_download_manifest_obj(self, manifest_input=None):
'''
Helper method to return a downloadmanifest obj by determining which
format the manifest input was provided; stdin, filepath, or url. The
manifest is stored in self.args.manifest and returned as a result.
:param manifest_input: filepath, fileobj, URL, or downloadmanifest obj
:returns downloadmanifest
'''
self.log.debug('Create DownloadManifest obj from the manifest '
'argument...')
manifest = manifest_input or self.args.manifest
if manifest:
if not isinstance(manifest, DownloadManifest):
if manifest == '-' or isinstance(DownloadManifest, file):
self.args.manifest = self._read_manifest_from_stdin()
else:
#see if manifest is a url or local path
try:
parsed_url = urlparse(str(manifest))
except Exception as pe:
self.log.debug('Error parsing manifest argument as'
' url, trying local path:' + str(pe))
if not parsed_url or not parsed_url.scheme:
self.args.manifest = self._read_manifest_from_file()
else:
# Reading from remote manifest from url
# For now limit urls to http(s)...
if not parsed_url.scheme in ['http', 'https']:
raise ArgumentTypeError('Manifest url only '
'supports http, https at '
'this time')
self.args.manifest = self._read_manifest_from_url()
else:
raise argparse.ArgumentError(None, 'Manifest is required (-m)')
return self.args.manifest
def _download_parts_to_fileobj(self, manifest, dest_fileobj):
'''
Attempts to iterate through all parts contained in 'manifest' and
download and concatenate each part to 'dest_fileobj'. If the
manifest contains the intended image size, and the resulting bytes
downloaded does not match this size, ValueError is raised.
:param manifest: downloadmanifest obj
:param dest_fileobj: file like object to write downloaded parts to
:returns bytes downloaded
'''
bytes = 0
for part_index in xrange(0, manifest.part_count):
part = manifest.get_part_by_index(part_index)
self.log.debug('Downloading part#:' + str(part.part_index))
bytes += part.download(dest_fileobj=dest_fileobj) or 0
self.log.debug('Wrote bytes:' + str(bytes) + "/"
+ str(manifest.download_image_size) + ", digest:"
+ str(part.written_digest))
if self.args.reportprogress:
stages.report_status('"bytes_downloaded":%d' % bytes)
if manifest.download_image_size is not None:
if bytes != manifest.download_image_size:
raise ValueError('Bytes Downloaded:"{0}" does not equal '
'manifest image size:"{1}"'
.format(bytes, manifest.download_image_size))
return bytes
def _download_parts_pipe_wrapper(self,
manifest,
dest_fileobj,
close_fd_excludes=[]):
close_fd_excludes.extend([dest_fileobj])
close_all_fds(close_fd_excludes)
return self._download_parts_to_fileobj(manifest=manifest,
dest_fileobj=dest_fileobj)
def _download_to_unbundlestream(self,
dest_fileobj,
manifest=None,
tools_path=None,
inactivity_timeout=120):
'''
Attempts to iterate through all parts contained in 'manifest' and
download and concatenate each part to euca2ools unbundle stream.
:params manifest: downloadmanifest obj
:tools_path: optional path to euca2ools euca-bundle-stream cmd
'''
download_r = None
download_w = None
monitor_w = None
monitor_r = None
unbundle_ps = None
download_ps = None
wait_threads = []
if not dest_fileobj or isinstance(dest_fileobj, basestring):
raise AttributeError('Dest fileobj must be file like obj, value:'
'"{0}"'.format(str(dest_fileobj)))
manifest = manifest or self.args.manifest
if tools_path is None:
tools_path = self.args.toolspath or ""
unbundle_tool_path = tools_path+'euca-unbundle-stream'
unbundle_ps_args = [unbundle_tool_path,
'--enc-key', str(manifest.enc_key),
'--enc-iv', str(manifest.enc_iv)]
#Enable debug on this subprocess if local arg is set
if self.args.debug:
unbundle_ps_args.append('--debug')
self.log.debug('Running "' + str(unbundle_tool_path) + '" with '
'args:' + ",".join(str(x) for x in unbundle_ps_args))
try:
download_r, download_w = open_pipe_fileobjs()
monitor_r, monitor_w = open_pipe_fileobjs()
#Begin the unbundle portion of this pipeline...
unbundle_ps = subprocess.Popen(unbundle_ps_args,
stdin=download_r,
stdout=monitor_w,
stderr=subprocess.PIPE,
close_fds=True,
bufsize=-1)
download_r.close()
monitor_w.close()
self.log.debug('Starting download parts process to feed unbundle')
#Iterate through all parts in manifest and download to unbundle
download_ps = spawn_process(self._download_parts_pipe_wrapper,
manifest=manifest,
dest_fileobj=download_w)
download_w.close()
self.log.debug('Starting process monitor')
# Process io monitor sits on top/end of unbundle pipe
# It attempts to gather information on the progress of the
# unbundle pipeline and provide information as to the bytes
# written to the destination file obj.
bytes = monitor_subprocess_io(infile=monitor_r,
outfile=dest_fileobj,
sub_stderr=unbundle_ps.stderr,
log_method=self.log.debug,
inactivity_timeout=inactivity_timeout)
self.log.debug('Done with unbundle pipeline...')
if self.args.reportprogress:
stages.report_status('"bytes_unbundled":%d' % bytes)
#Do some final wait/cleanup...
for ps in [unbundle_ps, download_ps]:
if ps:
wait_thread = wait_process_in_thread(ps.pid)
if wait_thread:
wait_threads.append(wait_thread)
# Monitor the subprocess pids in a separate threads, use join()
# timeout to kill processes if needed
for wait_thread in wait_threads:
if wait_thread:
wait_thread.join(timeout=inactivity_timeout)
except Exception, UBE:
if not self.args.reportprogress:
traceback.print_exc()
for ps in [unbundle_ps, download_ps]:
if ps:
try:
ps.terminate()
except:
pass
raise UBE
finally:
for f_pipe in [monitor_r, monitor_w, download_r, download_w]:
if f_pipe:
try:
f_pipe.close()
except:
pass
return bytes
def main(self):
manifest = self.args.manifest
#Dump manifest obj to screen and exit, if debug arg given.
if self.args.dumpmanifest:
print str(manifest)
os.sys.exit(0)
dest_file = self.args.destination
dest_file_name = self.args.destination
bytes = 0
#If this image is bundled, download parts to unbundle stream
#All other formats can be downloaded directly to destination
try:
expected_size = manifest.download_image_size
if isinstance(dest_file, file):
dest_file_name = '<stdout>'
dest_fileobj = dest_file
elif dest_file == "-":
dest_file_name = '<stdout>'
dest_fileobj = os.fdopen(os.dup(os.sys.stdout.fileno()), 'w')
else:
dest_file_name = str(dest_file)
dest_fileobj = open(dest_file, 'w')
if manifest.file_format == 'BUNDLE':
expected_size = manifest.unbundled_image_size
if not self.args.privatekey:
raise ArgumentError(self.args.privatekey,
'Bundle type needs privatekey -k')
#Download and unbundle...
bytes = self._download_to_unbundlestream(
dest_fileobj=dest_fileobj,
manifest=manifest)
else:
#Download raw parts...
with dest_fileobj:
bytes = self._download_parts_to_fileobj(
manifest=manifest, dest_fileobj=dest_fileobj)
#Done with the download, now check the resulting image size.
self.log.debug('Downloaded bytes:"{0}"'.format(str(bytes)))
self.log.debug('manifest download image size:'
+ str(manifest.download_image_size))
self.log.debug('manifest unbundled size:'
+ str(manifest.unbundled_image_size))
if bytes != expected_size:
raise ValueError('Bytes written:"{0}" does not equal '
'expected:"{1}"'.format(bytes, expected_size))
if dest_file != "-" and not self.args.destispipe:
self.log.info('Download Image wrote "{0}" bytes to: {1}'
.format(str(bytes), str(dest_file_name)))
else:
self.log.debug('Download Image wrote "{0}" bytes to: {1}'
.format(str(bytes), str(dest_file_name)))
except Exception, E:
if self.args.reportprogress:
stages.report_error(str(E))
else:
raise E
if __name__ == '__main__':
try:
di = DownloadImage().main()
os.sys.exit(0)
except Exception, E:
print >> sys.stderr, "Caught exception:'" + str(E) + "'"
traceback.print_exc()
os.sys.exit(1)
|
gpl-3.0
|
andreif/django
|
tests/migrations/test_executor.py
|
80
|
26645
|
from django.apps.registry import apps as global_apps
from django.db import connection
from django.db.migrations.exceptions import InvalidMigrationPlan
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from django.db.utils import DatabaseError
from django.test import TestCase, modify_settings, override_settings
from .test_base import MigrationTestBase
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
class ExecutorTests(MigrationTestBase):
"""
Tests the migration executor (full end-to-end running).
Bear in mind that if these are failing you should fix the other
test failures first, as they may be propagating into here.
"""
available_apps = ["migrations", "migrations2", "django.contrib.auth", "django.contrib.contenttypes"]
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_run(self):
"""
Tests running a simple set of migrations.
"""
executor = MigrationExecutor(connection)
# Let's look at the plan first and make sure it's up to scratch
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0002_second")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_run_with_squashed(self):
"""
Tests running a squashed migration from zero (should ignore what it replaces)
"""
executor = MigrationExecutor(connection)
# Check our leaf node is the squashed one
leaves = [key for key in executor.loader.graph.leaf_nodes() if key[0] == "migrations"]
self.assertEqual(leaves, [("migrations", "0001_squashed_0002")])
# Check the plan
plan = executor.migration_plan([("migrations", "0001_squashed_0002")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0001_squashed_0002")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did. Should also just use squashed.
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2",
})
def test_empty_plan(self):
"""
Tests that re-planning a full migration of a fully-migrated set doesn't
perform spurious unmigrations and remigrations.
There was previously a bug where the executor just always performed the
backwards plan for applied migrations - which even for the most recent
migration in an app, might include other, dependent apps, and these
were being unmigrated.
"""
# Make the initial plan, check it
executor = MigrationExecutor(connection)
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Fake-apply all migrations
executor.migrate([
("migrations", "0002_second"),
("migrations2", "0001_initial")
], fake=True)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Now plan a second time and make sure it's empty
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(plan, [])
# Erase all the fake records
executor.recorder.record_unapplied("migrations2", "0001_initial")
executor.recorder.record_unapplied("migrations", "0002_second")
executor.recorder.record_unapplied("migrations", "0001_initial")
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2_no_deps",
})
def test_mixed_plan_not_supported(self):
"""
Although the MigrationExecutor interfaces allows for mixed migration
plans (combined forwards and backwards migrations) this is not
supported.
"""
# Prepare for mixed plan
executor = MigrationExecutor(connection)
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
executor.migrate(None, plan)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
self.assertIn(('migrations', '0001_initial'), executor.loader.applied_migrations)
self.assertIn(('migrations', '0002_second'), executor.loader.applied_migrations)
self.assertNotIn(('migrations2', '0001_initial'), executor.loader.applied_migrations)
# Generate mixed plan
plan = executor.migration_plan([
("migrations", None),
("migrations2", "0001_initial"),
])
msg = (
'Migration plans with both forwards and backwards migrations are '
'not supported. Please split your migration process into separate '
'plans of only forwards OR backwards migrations.'
)
with self.assertRaisesMessage(InvalidMigrationPlan, msg) as cm:
executor.migrate(None, plan)
self.assertEqual(
cm.exception.args[1],
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
executor.migrate([
("migrations", None),
("migrations2", None),
])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
self.assertTableNotExists("migrations2_otherauthor")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_soft_apply(self):
"""
Tests detection of initial migrations already having been applied.
"""
state = {"faked": None}
def fake_storer(phase, migration=None, fake=None):
state["faked"] = fake
executor = MigrationExecutor(connection, progress_callback=fake_storer)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run it normally
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
executor.migrate([("migrations", "0001_initial")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# We shouldn't have faked that one
self.assertEqual(state["faked"], False)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Fake-reverse that
executor.migrate([("migrations", None)], fake=True)
# Are the tables still there?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure that was faked
self.assertEqual(state["faked"], True)
# Finally, migrate forwards; this should fake-apply our initial migration
executor.loader.build_graph()
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
# Applying the migration should raise a database level error
# because we haven't given the --fake-initial option
with self.assertRaises(DatabaseError):
executor.migrate([("migrations", "0001_initial")])
# Reset the faked state
state = {"faked": None}
# Allow faking of initial CreateModel operations
executor.migrate([("migrations", "0001_initial")], fake_initial=True)
self.assertEqual(state["faked"], True)
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_custom_user",
"django.contrib.auth": "django.contrib.auth.migrations",
},
AUTH_USER_MODEL="migrations.Author",
)
def test_custom_user(self):
"""
Regression test for #22325 - references to a custom user model defined in the
same app are not resolved correctly.
"""
executor = MigrationExecutor(connection)
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Migrate forwards
executor.migrate([("migrations", "0001_initial")])
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure the soft-application detection works (#23093)
# Change table_names to not return auth_user during this as
# it wouldn't be there in a normal run, and ensure migrations.Author
# exists in the global app registry temporarily.
old_table_names = connection.introspection.table_names
connection.introspection.table_names = lambda c: [x for x in old_table_names(c) if x != "auth_user"]
migrations_apps = executor.loader.project_state(("migrations", "0001_initial")).apps
global_apps.get_app_config("migrations").models["author"] = migrations_apps.get_model("migrations", "author")
try:
migration = executor.loader.get_migration("auth", "0001_initial")
self.assertEqual(executor.detect_soft_applied(None, migration)[0], True)
finally:
connection.introspection.table_names = old_table_names
del global_apps.get_app_config("migrations").models["author"]
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c"
]
)
def test_unrelated_model_lookups_forwards(self):
"""
#24123 - Tests that all models of apps already applied which are
unrelated to the first app being applied are part of the initial model
state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate([("lookuperror_b", "0003_b3")])
self.assertTableExists("lookuperror_b_b3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate forwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is already applied
executor.migrate([
("lookuperror_a", "0004_a4"),
("lookuperror_c", "0003_c3"),
])
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate([
("lookuperror_a", None),
("lookuperror_b", None),
("lookuperror_c", None),
])
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c"
]
)
def test_unrelated_model_lookups_backwards(self):
"""
#24123 - Tests that all models of apps being unapplied which are
unrelated to the first app being unapplied are part of the initial
model state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate([
("lookuperror_a", "0004_a4"),
("lookuperror_b", "0003_b3"),
("lookuperror_c", "0003_c3"),
])
self.assertTableExists("lookuperror_b_b3")
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate backwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is not in the initial state (unrelated to app c)
executor.migrate([("lookuperror_a", None)])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate([
("lookuperror_b", None),
("lookuperror_c", None)
])
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_process_callback(self):
"""
#24129 - Tests callback process
"""
call_args_list = []
def callback(*args):
call_args_list.append(args)
executor = MigrationExecutor(connection, progress_callback=callback)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
executor.migrate([
("migrations", "0001_initial"),
("migrations", "0002_second"),
])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
executor.migrate([
("migrations", None),
("migrations", None),
])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
migrations = executor.loader.graph.nodes
expected = [
("render_start", ),
("render_success", ),
("apply_start", migrations['migrations', '0001_initial'], False),
("apply_success", migrations['migrations', '0001_initial'], False),
("apply_start", migrations['migrations', '0002_second'], False),
("apply_success", migrations['migrations', '0002_second'], False),
("render_start", ),
("render_success", ),
("unapply_start", migrations['migrations', '0002_second'], False),
("unapply_success", migrations['migrations', '0002_second'], False),
("unapply_start", migrations['migrations', '0001_initial'], False),
("unapply_success", migrations['migrations', '0001_initial'], False),
]
self.assertEqual(call_args_list, expected)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_alter_id_type_with_fk(self):
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
# Apply initial migrations
executor.migrate([
("author_app", "0001_initial"),
("book_app", "0001_initial"),
])
self.assertTableExists("author_app_author")
self.assertTableExists("book_app_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Apply PK type alteration
executor.migrate([("author_app", "0002_alter_id")])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# We can't simply unapply the migrations here because there is no
# implicit cast from VARCHAR to INT on the database level.
with connection.schema_editor() as editor:
editor.execute(editor.sql_delete_table % {"table": "book_app_book"})
editor.execute(editor.sql_delete_table % {"table": "author_app_author"})
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_apply_all_replaced_marks_replacement_as_applied(self):
"""
Applying all replaced migrations marks replacement as applied (#24628).
"""
recorder = MigrationRecorder(connection)
# Place the database in a state where the replaced migrations are
# partially applied: 0001 is applied, 0002 is not.
recorder.record_applied("migrations", "0001_initial")
executor = MigrationExecutor(connection)
# Use fake because we don't actually have the first migration
# applied, so the second will fail. And there's no need to actually
# create/modify tables here, we're just testing the
# MigrationRecord, which works the same with or without fake.
executor.migrate([("migrations", "0002_second")], fake=True)
# Because we've now applied 0001 and 0002 both, their squashed
# replacement should be marked as applied.
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations(),
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_marks_replacement_applied_even_if_it_did_nothing(self):
"""
A new squash migration will be marked as applied even if all its
replaced migrations were previously already applied (#24628).
"""
recorder = MigrationRecorder(connection)
# Record all replaced migrations as applied
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
executor = MigrationExecutor(connection)
executor.migrate([("migrations", "0001_squashed_0002")])
# Because 0001 and 0002 are both applied, even though this migrate run
# didn't apply anything new, their squashed replacement should be
# marked as applied.
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations(),
)
class FakeLoader(object):
def __init__(self, graph, applied):
self.graph = graph
self.applied_migrations = applied
class FakeMigration(object):
"""Really all we need is any object with a debug-useful repr."""
def __init__(self, name):
self.name = name
def __repr__(self):
return 'M<%s>' % self.name
class ExecutorUnitTests(TestCase):
"""(More) isolated unit tests for executor methods."""
def test_minimize_rollbacks(self):
"""
Minimize unnecessary rollbacks in connected apps.
When you say "./manage.py migrate appA 0001", rather than migrating to
just after appA-0001 in the linearized migration plan (which could roll
back migrations in other apps that depend on appA 0001, but don't need
to be rolled back since we're not rolling back appA 0001), we migrate
to just before appA-0002.
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(b1, b1_impl)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, a2, a1)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {a1, b1, a2})
plan = executor.migration_plan({a1})
self.assertEqual(plan, [(a2_impl, True)])
def test_minimize_rollbacks_branchy(self):
"""
Minimize rollbacks when target has multiple in-app children.
a: 1 <---- 3 <--\
\ \- 2 <--- 4
\ \
b: \- 1 <--- 2
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
a3_impl = FakeMigration('a3')
a3 = ('a', '3')
a4_impl = FakeMigration('a4')
a4 = ('a', '4')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
b2_impl = FakeMigration('b2')
b2 = ('b', '2')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(a3, a3_impl)
graph.add_node(a4, a4_impl)
graph.add_node(b1, b1_impl)
graph.add_node(b2, b2_impl)
graph.add_dependency(None, a2, a1)
graph.add_dependency(None, a3, a1)
graph.add_dependency(None, a4, a2)
graph.add_dependency(None, a4, a3)
graph.add_dependency(None, b2, b1)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, b2, a2)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {a1, b1, a2, b2, a3, a4})
plan = executor.migration_plan({a1})
should_be_rolled_back = [b2_impl, a4_impl, a2_impl, a3_impl]
exp = [(m, True) for m in should_be_rolled_back]
self.assertEqual(plan, exp)
def test_backwards_nothing_to_do(self):
"""
If the current state satisfies the given target, do nothing.
a: 1 <--- 2
b: \- 1
c: \- 1
If a1 is applied already and a2 is not, and we're asked to migrate to
a1, don't apply or unapply b1 or c1, regardless of their current state.
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
c1_impl = FakeMigration('c1')
c1 = ('c', '1')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(b1, b1_impl)
graph.add_node(c1, c1_impl)
graph.add_dependency(None, a2, a1)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, c1, a1)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {a1, b1})
plan = executor.migration_plan({a1})
self.assertEqual(plan, [])
|
bsd-3-clause
|
ammaradil/fibonacci
|
Lib/site-packages/django/conf/locale/fi/formats.py
|
504
|
1390
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = r'j. E Y \k\e\l\l\o G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.n.Y'
SHORT_DATETIME_FORMAT = 'j.n.Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y', # '20.3.14'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H.%M.%S', # '20.3.2014 14.30.59'
'%d.%m.%Y %H.%M.%S.%f', # '20.3.2014 14.30.59.000200'
'%d.%m.%Y %H.%M', # '20.3.2014 14.30'
'%d.%m.%Y', # '20.3.2014'
'%d.%m.%y %H.%M.%S', # '20.3.14 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '20.3.14 14.30.59.000200'
'%d.%m.%y %H.%M', # '20.3.14 14.30'
'%d.%m.%y', # '20.3.14'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # Non-breaking space
NUMBER_GROUPING = 3
|
mit
|
zoni/errbot
|
errbot/backend_plugin_manager.py
|
2
|
1705
|
import logging
import sys
from pathlib import Path
from typing import Any, Type
from errbot.plugin_info import PluginInfo
from .utils import collect_roots
log = logging.getLogger(__name__)
class PluginNotFoundException(Exception):
pass
class BackendPluginManager:
"""
This is a one shot plugin manager for Backends and Storage plugins.
"""
def __init__(self, bot_config, base_module: str, plugin_name: str, base_class: Type,
base_search_dir, extra_search_dirs=()):
self._config = bot_config
self._base_module = base_module
self._base_class = base_class
self.plugin_info = None
all_plugins_paths = collect_roots((base_search_dir, extra_search_dirs))
plugin_places = [Path(root) for root in all_plugins_paths]
for path in plugin_places:
plugfiles = path.glob('**/*.plug')
for plugfile in plugfiles:
plugin_info = PluginInfo.load(plugfile)
if plugin_info.name == plugin_name:
self.plugin_info = plugin_info
return
raise PluginNotFoundException('Could not find the plugin named %s in %s.' % (plugin_name, all_plugins_paths))
def load_plugin(self) -> Any:
plugin_path = self.plugin_info.location.parent
if plugin_path not in sys.path:
sys.path.append(plugin_path)
plugin_classes = self.plugin_info.load_plugin_classes(self._base_module, self._base_class)
if len(plugin_classes) != 1:
raise PluginNotFoundException('Found more that one plugin for %s.' % self._base_class)
_, clazz = plugin_classes[0]
return clazz(self._config)
|
gpl-3.0
|
gangadharkadam/smrtfrappe
|
frappe/templates/pages/contact.py
|
32
|
1561
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import now
def get_context(context):
doc = frappe.get_doc("Contact Us Settings", "Contact Us Settings")
if doc.query_options:
query_options = [opt.strip() for opt in doc.query_options.replace(",", "\n").split("\n") if opt]
else:
query_options = ["Sales", "Support", "General"]
address = None
if doc.get("address"):
address = frappe.get_doc("Address", doc.address)
out = {
"query_options": query_options
}
out.update(doc.as_dict())
return out
max_communications_per_hour = 1000
@frappe.whitelist(allow_guest=True)
def send_message(subject="Website Query", message="", sender=""):
if not message:
frappe.response["message"] = 'Please write something'
return
if not sender:
frappe.response["message"] = 'Email Id Required'
return
# guest method, cap max writes per hour
if frappe.db.sql("""select count(*) from `tabCommunication`
where `sent_or_received`="Received"
and TIMEDIFF(%s, modified) < '01:00:00'""", now())[0][0] > max_communications_per_hour:
frappe.response["message"] = "Sorry: we believe we have received an unreasonably high number of requests of this kind. Please try later"
return
# send email
forward_to_email = frappe.db.get_value("Contact Us Settings", None, "forward_to_email")
if forward_to_email:
from frappe.utils.email_lib import sendmail
sendmail(forward_to_email, sender, message, subject)
return "okay"
|
mit
|
DelazJ/QGIS
|
tests/src/python/test_qgsproject.py
|
23
|
57311
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsProject.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import chr
from builtins import range
__author__ = 'Sebastian Dietrich'
__date__ = '19/11/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
import os
import re
from osgeo import ogr
import codecs
from io import BytesIO
from zipfile import ZipFile
from tempfile import TemporaryDirectory
import qgis # NOQA
from qgis.core import (QgsProject,
QgsCoordinateTransformContext,
QgsProjectDirtyBlocker,
QgsApplication,
QgsUnitTypes,
QgsCoordinateReferenceSystem,
QgsLabelingEngineSettings,
QgsVectorLayer,
QgsRasterLayer,
QgsMapLayer,
QgsExpressionContextUtils,
QgsProjectColorScheme,
QgsSettings)
from qgis.gui import (QgsLayerTreeMapCanvasBridge,
QgsMapCanvas)
from qgis.PyQt.QtTest import QSignalSpy
from qgis.PyQt.QtCore import QT_VERSION_STR, QTemporaryDir, QTemporaryFile
from qgis.PyQt.QtGui import QColor
from qgis.PyQt import sip
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
from shutil import copyfile
app = start_app()
TEST_DATA_DIR = unitTestDataPath()
def createLayer(name):
return QgsVectorLayer("Point?field=x:string", name, "memory")
class TestQgsProject(unittest.TestCase):
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
self.messageCaught = False
def test_makeKeyTokens_(self):
# see http://www.w3.org/TR/REC-xml/#d0e804 for a list of valid characters
invalidTokens = []
validTokens = []
# all test tokens will be generated by prepending or inserting characters to this token
validBase = "valid"
# some invalid characters, not allowed anywhere in a token
# note that '/' must not be added here because it is taken as a separator by makeKeyTokens_()
invalidChars = "+*,;<>|!$%()=?#\x01"
# generate the characters that are allowed at the start of a token (and at every other position)
validStartChars = ":_"
charRanges = [
(ord('a'), ord('z')),
(ord('A'), ord('Z')),
(0x00F8, 0x02FF),
(0x0370, 0x037D),
(0x037F, 0x1FFF),
(0x200C, 0x200D),
(0x2070, 0x218F),
(0x2C00, 0x2FEF),
(0x3001, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFFD),
# (0x10000, 0xEFFFF), while actually valid, these are not yet accepted by makeKeyTokens_()
]
for r in charRanges:
for c in range(r[0], r[1]):
validStartChars += chr(c)
# generate the characters that are only allowed inside a token, not at the start
validInlineChars = "-.\xB7"
charRanges = [
(ord('0'), ord('9')),
(0x0300, 0x036F),
(0x203F, 0x2040),
]
for r in charRanges:
for c in range(r[0], r[1]):
validInlineChars += chr(c)
# test forbidden start characters
for c in invalidChars + validInlineChars:
invalidTokens.append(c + validBase)
# test forbidden inline characters
for c in invalidChars:
invalidTokens.append(validBase[:4] + c + validBase[4:])
# test each allowed start character
for c in validStartChars:
validTokens.append(c + validBase)
# test each allowed inline character
for c in validInlineChars:
validTokens.append(validBase[:4] + c + validBase[4:])
logger = QgsApplication.messageLog()
logger.messageReceived.connect(self.catchMessage)
prj = QgsProject.instance()
for token in validTokens:
self.messageCaught = False
prj.readEntry("test", token)
myMessage = "valid token '%s' not accepted" % (token)
assert not self.messageCaught, myMessage
for token in invalidTokens:
self.messageCaught = False
prj.readEntry("test", token)
myMessage = "invalid token '%s' accepted" % (token)
assert self.messageCaught, myMessage
logger.messageReceived.disconnect(self.catchMessage)
def catchMessage(self):
self.messageCaught = True
def testClear(self):
prj = QgsProject.instance()
prj.setTitle('xxx')
spy = QSignalSpy(prj.cleared)
prj.clear()
self.assertEqual(len(spy), 1)
self.assertFalse(prj.title())
def testCrs(self):
prj = QgsProject.instance()
prj.clear()
self.assertFalse(prj.crs().isValid())
prj.setCrs(QgsCoordinateReferenceSystem.fromOgcWmsCrs('EPSG:3111'))
self.assertEqual(prj.crs().authid(), 'EPSG:3111')
def testEllipsoid(self):
prj = QgsProject.instance()
prj.clear()
prj.setCrs(QgsCoordinateReferenceSystem.fromOgcWmsCrs('EPSG:3111'))
prj.setEllipsoid('WGS84')
self.assertEqual(prj.ellipsoid(), 'WGS84')
# if project has NO crs, then ellipsoid should always be none
prj.setCrs(QgsCoordinateReferenceSystem())
self.assertEqual(prj.ellipsoid(), 'NONE')
def testDistanceUnits(self):
prj = QgsProject.instance()
prj.clear()
prj.setDistanceUnits(QgsUnitTypes.DistanceFeet)
self.assertEqual(prj.distanceUnits(), QgsUnitTypes.DistanceFeet)
def testAreaUnits(self):
prj = QgsProject.instance()
prj.clear()
prj.setAreaUnits(QgsUnitTypes.AreaSquareFeet)
self.assertEqual(prj.areaUnits(), QgsUnitTypes.AreaSquareFeet)
def testReadEntry(self):
prj = QgsProject.instance()
prj.read(os.path.join(TEST_DATA_DIR, 'labeling/test-labeling.qgs'))
# add a test entry list
prj.writeEntry("TestScope", "/TestListProperty", ["Entry1", "Entry2"])
# valid key, valid value
self.assertEqual(prj.readNumEntry("SpatialRefSys", "/ProjectionsEnabled", -1), (0, True))
self.assertEqual(prj.readEntry("SpatialRefSys", "/ProjectCrs"), ("EPSG:32613", True))
self.assertEqual(prj.readBoolEntry("PAL", "/ShowingCandidates"), (False, True))
self.assertEqual(prj.readNumEntry("PAL", "/CandidatesPolygon"), (8., True))
self.assertEqual(prj.readListEntry("TestScope", "/TestListProperty"), (["Entry1", "Entry2"], True))
# invalid key
self.assertEqual(prj.readNumEntry("SpatialRefSys", "/InvalidKey", -1), (-1, False))
self.assertEqual(prj.readEntry("SpatialRefSys", "/InvalidKey", "wrong"), ("wrong", False))
self.assertEqual(prj.readBoolEntry("PAL", "/InvalidKey", True), (True, False))
self.assertEqual(prj.readDoubleEntry("PAL", "/InvalidKey", 42.), (42., False))
self.assertEqual(prj.readListEntry("TestScope", "/InvalidKey", ["Default1", "Default2"]), (["Default1", "Default2"], False))
def testEmbeddedGroup(self):
testdata_path = unitTestDataPath('embedded_groups') + '/'
prj_path = os.path.join(testdata_path, "project2.qgs")
prj = QgsProject()
prj.read(prj_path)
layer_tree_group = prj.layerTreeRoot()
self.assertEqual(len(layer_tree_group.findLayerIds()), 2)
for layer_id in layer_tree_group.findLayerIds():
name = prj.mapLayer(layer_id).name()
self.assertTrue(name in ['polys', 'lines'])
if name == 'polys':
self.assertTrue(layer_tree_group.findLayer(layer_id).itemVisibilityChecked())
elif name == 'lines':
self.assertFalse(layer_tree_group.findLayer(layer_id).itemVisibilityChecked())
def testInstance(self):
""" test retrieving global instance """
self.assertTrue(QgsProject.instance())
# register a layer to the singleton
QgsProject.instance().addMapLayer(createLayer('test'))
# check that the same instance is returned
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayer(self):
""" test adding individual map layers to registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
self.assertEqual(QgsProject.instance().addMapLayer(l1), l1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
# adding a second layer should leave existing layers intact
l2 = createLayer('test2')
self.assertEqual(QgsProject.instance().addMapLayer(l2), l2)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test2')), 1)
self.assertEqual(QgsProject.instance().count(), 2)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayerAlreadyAdded(self):
""" test that already added layers can't be readded to registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
QgsProject.instance().addMapLayer(l1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
self.assertEqual(QgsProject.instance().addMapLayer(l1), None)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayerInvalid(self):
""" test that invalid map layers can be added to registry """
QgsProject.instance().removeAllMapLayers()
vl = QgsVectorLayer("Point?field=x:string", 'test', "xxx")
self.assertEqual(QgsProject.instance().addMapLayer(vl), vl)
self.assertFalse(vl in QgsProject.instance().mapLayers(True).values())
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
self.assertEqual(QgsProject.instance().validCount(), 0)
self.assertEqual(len(QgsProject.instance().mapLayers(True)), 0)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayerSignals(self):
""" test that signals are correctly emitted when adding map layer"""
QgsProject.instance().removeAllMapLayers()
layer_was_added_spy = QSignalSpy(QgsProject.instance().layerWasAdded)
layers_added_spy = QSignalSpy(QgsProject.instance().layersAdded)
legend_layers_added_spy = QSignalSpy(QgsProject.instance().legendLayersAdded)
l1 = createLayer('test')
QgsProject.instance().addMapLayer(l1)
# can't seem to actually test the data which was emitted, so best we can do is test
# the signal count
self.assertEqual(len(layer_was_added_spy), 1)
self.assertEqual(len(layers_added_spy), 1)
self.assertEqual(len(legend_layers_added_spy), 1)
# layer not added to legend
QgsProject.instance().addMapLayer(createLayer('test2'), False)
self.assertEqual(len(layer_was_added_spy), 2)
self.assertEqual(len(layers_added_spy), 2)
self.assertEqual(len(legend_layers_added_spy), 1)
# try readding a layer already in the registry
QgsProject.instance().addMapLayer(l1)
# should be no extra signals emitted
self.assertEqual(len(layer_was_added_spy), 2)
self.assertEqual(len(layers_added_spy), 2)
self.assertEqual(len(legend_layers_added_spy), 1)
def test_addMapLayers(self):
""" test adding multiple map layers to registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
l2 = createLayer('test2')
self.assertEqual(set(QgsProject.instance().addMapLayers([l1, l2])), set([l1, l2]))
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test2')), 1)
self.assertEqual(QgsProject.instance().count(), 2)
# adding more layers should leave existing layers intact
l3 = createLayer('test3')
l4 = createLayer('test4')
self.assertEqual(set(QgsProject.instance().addMapLayers([l3, l4])), set([l3, l4]))
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test2')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test3')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test4')), 1)
self.assertEqual(QgsProject.instance().count(), 4)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayersInvalid(self):
""" test that invalid map layers can be added to registry """
QgsProject.instance().removeAllMapLayers()
vl = QgsVectorLayer("Point?field=x:string", 'test', "xxx")
self.assertEqual(QgsProject.instance().addMapLayers([vl]), [vl])
self.assertFalse(vl in QgsProject.instance().mapLayers(True).values())
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
self.assertEqual(QgsProject.instance().validCount(), 0)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayersAlreadyAdded(self):
""" test that already added layers can't be readded to registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
self.assertEqual(QgsProject.instance().addMapLayers([l1]), [l1])
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
self.assertEqual(QgsProject.instance().addMapLayers([l1]), [])
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayersSignals(self):
""" test that signals are correctly emitted when adding map layers"""
QgsProject.instance().removeAllMapLayers()
layer_was_added_spy = QSignalSpy(QgsProject.instance().layerWasAdded)
layers_added_spy = QSignalSpy(QgsProject.instance().layersAdded)
legend_layers_added_spy = QSignalSpy(QgsProject.instance().legendLayersAdded)
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
# can't seem to actually test the data which was emitted, so best we can do is test
# the signal count
self.assertEqual(len(layer_was_added_spy), 2)
self.assertEqual(len(layers_added_spy), 1)
self.assertEqual(len(legend_layers_added_spy), 1)
# layer not added to legend
QgsProject.instance().addMapLayers([createLayer('test3'), createLayer('test4')], False)
self.assertEqual(len(layer_was_added_spy), 4)
self.assertEqual(len(layers_added_spy), 2)
self.assertEqual(len(legend_layers_added_spy), 1)
# try readding a layer already in the registry
QgsProject.instance().addMapLayers([l1, l2])
# should be no extra signals emitted
self.assertEqual(len(layer_was_added_spy), 4)
self.assertEqual(len(layers_added_spy), 2)
self.assertEqual(len(legend_layers_added_spy), 1)
def test_mapLayerById(self):
""" test retrieving map layer by ID """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
self.assertEqual(QgsProject.instance().mapLayer('bad'), None)
self.assertEqual(QgsProject.instance().mapLayer(None), None)
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().mapLayer('bad'), None)
self.assertEqual(QgsProject.instance().mapLayer(None), None)
self.assertEqual(QgsProject.instance().mapLayer(l1.id()), l1)
self.assertEqual(QgsProject.instance().mapLayer(l2.id()), l2)
def test_mapLayersByName(self):
""" test retrieving map layer by name """
p = QgsProject()
# test no crash with empty registry
self.assertEqual(p.mapLayersByName('bad'), [])
self.assertEqual(p.mapLayersByName(None), [])
l1 = createLayer('test')
l2 = createLayer('test2')
p.addMapLayers([l1, l2])
self.assertEqual(p.mapLayersByName('bad'), [])
self.assertEqual(p.mapLayersByName(None), [])
self.assertEqual(p.mapLayersByName('test'), [l1])
self.assertEqual(p.mapLayersByName('test2'), [l2])
# duplicate name
l3 = createLayer('test')
p.addMapLayer(l3)
self.assertEqual(set(p.mapLayersByName('test')), set([l1, l3]))
def test_mapLayers(self):
""" test retrieving map layers list """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
self.assertEqual(QgsProject.instance().mapLayers(), {})
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().mapLayers(), {l1.id(): l1, l2.id(): l2})
def test_removeMapLayersById(self):
""" test removing map layers by ID """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
QgsProject.instance().removeMapLayers(['bad'])
QgsProject.instance().removeMapLayers([None])
l1 = createLayer('test')
l2 = createLayer('test2')
l3 = createLayer('test3')
QgsProject.instance().addMapLayers([l1, l2, l3])
self.assertEqual(QgsProject.instance().count(), 3)
# remove bad layers
QgsProject.instance().removeMapLayers(['bad'])
self.assertEqual(QgsProject.instance().count(), 3)
QgsProject.instance().removeMapLayers([None])
self.assertEqual(QgsProject.instance().count(), 3)
# remove valid layers
l1_id = l1.id()
QgsProject.instance().removeMapLayers([l1_id])
self.assertEqual(QgsProject.instance().count(), 2)
# double remove
QgsProject.instance().removeMapLayers([l1_id])
self.assertEqual(QgsProject.instance().count(), 2)
# test that layer has been deleted
self.assertTrue(sip.isdeleted(l1))
# remove multiple
QgsProject.instance().removeMapLayers([l2.id(), l3.id()])
self.assertEqual(QgsProject.instance().count(), 0)
self.assertTrue(sip.isdeleted(l2))
# try removing a layer not in the registry
l4 = createLayer('test4')
QgsProject.instance().removeMapLayers([l4.id()])
self.assertFalse(sip.isdeleted(l4))
# fails on qt5 due to removeMapLayers list type conversion - needs a PyName alias
# added to removeMapLayers for QGIS 3.0
@unittest.expectedFailure(QT_VERSION_STR[0] == '5')
def test_removeMapLayersByLayer(self):
""" test removing map layers by layer"""
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
QgsProject.instance().removeMapLayers([None])
l1 = createLayer('test')
l2 = createLayer('test2')
l3 = createLayer('test3')
QgsProject.instance().addMapLayers([l1, l2, l3])
self.assertEqual(QgsProject.instance().count(), 3)
# remove bad layers
QgsProject.instance().removeMapLayers([None])
self.assertEqual(QgsProject.instance().count(), 3)
# remove valid layers
QgsProject.instance().removeMapLayers([l1])
self.assertEqual(QgsProject.instance().count(), 2)
# test that layer has been deleted
self.assertTrue(sip.isdeleted(l1))
# remove multiple
QgsProject.instance().removeMapLayers([l2, l3])
self.assertEqual(QgsProject.instance().count(), 0)
self.assertTrue(sip.isdeleted(l2))
self.assertTrue(sip.isdeleted(l3))
def test_removeMapLayerById(self):
""" test removing a map layer by ID """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
QgsProject.instance().removeMapLayer('bad')
QgsProject.instance().removeMapLayer(None)
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().count(), 2)
# remove bad layers
QgsProject.instance().removeMapLayer('bad')
self.assertEqual(QgsProject.instance().count(), 2)
QgsProject.instance().removeMapLayer(None)
self.assertEqual(QgsProject.instance().count(), 2)
# remove valid layers
l1_id = l1.id()
QgsProject.instance().removeMapLayer(l1_id)
self.assertEqual(QgsProject.instance().count(), 1)
# double remove
QgsProject.instance().removeMapLayer(l1_id)
self.assertEqual(QgsProject.instance().count(), 1)
# test that layer has been deleted
self.assertTrue(sip.isdeleted(l1))
# remove second layer
QgsProject.instance().removeMapLayer(l2.id())
self.assertEqual(QgsProject.instance().count(), 0)
self.assertTrue(sip.isdeleted(l2))
# try removing a layer not in the registry
l3 = createLayer('test3')
QgsProject.instance().removeMapLayer(l3.id())
self.assertFalse(sip.isdeleted(l3))
def test_removeMapLayerByLayer(self):
""" test removing a map layer by layer """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
QgsProject.instance().removeMapLayer('bad')
QgsProject.instance().removeMapLayer(None)
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().count(), 2)
# remove bad layers
QgsProject.instance().removeMapLayer(None)
self.assertEqual(QgsProject.instance().count(), 2)
l3 = createLayer('test3')
QgsProject.instance().removeMapLayer(l3)
self.assertEqual(QgsProject.instance().count(), 2)
# remove valid layers
QgsProject.instance().removeMapLayer(l1)
self.assertEqual(QgsProject.instance().count(), 1)
# test that layer has been deleted
self.assertTrue(sip.isdeleted(l1))
# remove second layer
QgsProject.instance().removeMapLayer(l2)
self.assertEqual(QgsProject.instance().count(), 0)
self.assertTrue(sip.isdeleted(l2))
# try removing a layer not in the registry
l3 = createLayer('test3')
QgsProject.instance().removeMapLayer(l3)
self.assertFalse(sip.isdeleted(l3))
def test_removeAllMapLayers(self):
""" test removing all map layers from registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().count(), 2)
QgsProject.instance().removeAllMapLayers()
self.assertEqual(QgsProject.instance().count(), 0)
self.assertEqual(QgsProject.instance().mapLayersByName('test'), [])
self.assertEqual(QgsProject.instance().mapLayersByName('test2'), [])
def test_addRemoveLayersSignals(self):
""" test that signals are correctly emitted when removing map layers"""
QgsProject.instance().removeAllMapLayers()
layers_will_be_removed_spy = QSignalSpy(QgsProject.instance().layersWillBeRemoved)
layer_will_be_removed_spy_str = QSignalSpy(QgsProject.instance().layerWillBeRemoved[str])
layer_will_be_removed_spy_layer = QSignalSpy(QgsProject.instance().layerWillBeRemoved[QgsMapLayer])
layers_removed_spy = QSignalSpy(QgsProject.instance().layersRemoved)
layer_removed_spy = QSignalSpy(QgsProject.instance().layerRemoved)
remove_all_spy = QSignalSpy(QgsProject.instance().removeAll)
l1 = createLayer('l1')
l2 = createLayer('l2')
l3 = createLayer('l3')
l4 = createLayer('l4')
QgsProject.instance().addMapLayers([l1, l2, l3, l4])
# remove 1 layer
QgsProject.instance().removeMapLayer(l1)
# can't seem to actually test the data which was emitted, so best we can do is test
# the signal count
self.assertEqual(len(layers_will_be_removed_spy), 1)
self.assertEqual(len(layer_will_be_removed_spy_str), 1)
self.assertEqual(len(layer_will_be_removed_spy_layer), 1)
self.assertEqual(len(layers_removed_spy), 1)
self.assertEqual(len(layer_removed_spy), 1)
self.assertEqual(len(remove_all_spy), 0)
self.assertEqual(QgsProject.instance().count(), 3)
# remove 2 layers at once
QgsProject.instance().removeMapLayers([l2.id(), l3.id()])
self.assertEqual(len(layers_will_be_removed_spy), 2)
self.assertEqual(len(layer_will_be_removed_spy_str), 3)
self.assertEqual(len(layer_will_be_removed_spy_layer), 3)
self.assertEqual(len(layers_removed_spy), 2)
self.assertEqual(len(layer_removed_spy), 3)
self.assertEqual(len(remove_all_spy), 0)
self.assertEqual(QgsProject.instance().count(), 1)
# remove all
QgsProject.instance().removeAllMapLayers()
self.assertEqual(len(layers_will_be_removed_spy), 3)
self.assertEqual(len(layer_will_be_removed_spy_str), 4)
self.assertEqual(len(layer_will_be_removed_spy_layer), 4)
self.assertEqual(len(layers_removed_spy), 3)
self.assertEqual(len(layer_removed_spy), 4)
self.assertEqual(len(remove_all_spy), 1)
# remove some layers which aren't in the registry
QgsProject.instance().removeMapLayers(['asdasd'])
self.assertEqual(len(layers_will_be_removed_spy), 3)
self.assertEqual(len(layer_will_be_removed_spy_str), 4)
self.assertEqual(len(layer_will_be_removed_spy_layer), 4)
self.assertEqual(len(layers_removed_spy), 3)
self.assertEqual(len(layer_removed_spy), 4)
self.assertEqual(len(remove_all_spy), 1)
l5 = createLayer('test5')
QgsProject.instance().removeMapLayer(l5)
self.assertEqual(len(layers_will_be_removed_spy), 3)
self.assertEqual(len(layer_will_be_removed_spy_str), 4)
self.assertEqual(len(layer_will_be_removed_spy_layer), 4)
self.assertEqual(len(layers_removed_spy), 3)
self.assertEqual(len(layer_removed_spy), 4)
self.assertEqual(len(remove_all_spy), 1)
def test_RemoveLayerShouldNotSegFault(self):
QgsProject.instance().removeAllMapLayers()
reg = QgsProject.instance()
# Should not segfault
reg.removeMapLayers(['not_exists'])
reg.removeMapLayer('not_exists2')
# check also that the removal of an unexistent layer does not insert a null layer
for k, layer in list(reg.mapLayers().items()):
assert (layer is not None)
def testTakeLayer(self):
# test taking ownership of a layer from the project
l1 = createLayer('l1')
l2 = createLayer('l2')
p = QgsProject()
# add one layer to project
p.addMapLayer(l1)
self.assertEqual(p.mapLayers(), {l1.id(): l1})
self.assertEqual(l1.parent().parent(), p)
# try taking some layers which don't exist in project
self.assertFalse(p.takeMapLayer(None))
self.assertFalse(p.takeMapLayer(l2))
# but l2 should still exist..
self.assertTrue(l2.isValid())
# take layer from project
self.assertEqual(p.takeMapLayer(l1), l1)
self.assertFalse(p.mapLayers()) # no layers left
# but l1 should still exist
self.assertTrue(l1.isValid())
# layer should have no parent now
self.assertFalse(l1.parent())
# destroy project
p = None
self.assertTrue(l1.isValid())
def test_transactionsGroup(self):
# Undefined transaction group (wrong provider key).
QgsProject.instance().setAutoTransaction(True)
noTg = QgsProject.instance().transactionGroup("provider-key", "database-connection-string")
self.assertIsNone(noTg)
def test_zip_new_project(self):
tmpDir = QTemporaryDir()
tmpFile = "{}/project.qgz".format(tmpDir.path())
# zip with existing file
open(tmpFile, 'a').close()
project = QgsProject()
self.assertTrue(project.write(tmpFile))
# zip with non existing file
os.remove(tmpFile)
project = QgsProject()
self.assertTrue(project.write(tmpFile))
self.assertTrue(os.path.isfile(tmpFile))
def test_zip_invalid_path(self):
project = QgsProject()
self.assertFalse(project.write())
self.assertFalse(project.write(""))
self.assertFalse(project.write("/fake/test.zip"))
def test_zip_filename(self):
tmpDir = QTemporaryDir()
tmpFile = "{}/project.qgz".format(tmpDir.path())
project = QgsProject()
self.assertFalse(project.write())
project.setFileName(tmpFile)
self.assertTrue(project.write())
self.assertTrue(os.path.isfile(tmpFile))
def test_unzip_invalid_path(self):
project = QgsProject()
self.assertFalse(project.read())
self.assertFalse(project.read(""))
self.assertFalse(project.read("/fake/test.zip"))
def test_zip_unzip(self):
tmpDir = QTemporaryDir()
tmpFile = "{}/project.qgz".format(tmpDir.path())
project = QgsProject()
l0 = QgsVectorLayer(os.path.join(TEST_DATA_DIR, "points.shp"), "points", "ogr")
l1 = QgsVectorLayer(os.path.join(TEST_DATA_DIR, "lines.shp"), "lines", "ogr")
project.addMapLayers([l0, l1])
self.assertTrue(project.write(tmpFile))
project2 = QgsProject()
self.assertFalse(project2.isZipped())
self.assertTrue(project2.fileName() == "")
self.assertTrue(project2.read(tmpFile))
self.assertTrue(project2.isZipped())
self.assertTrue(project2.fileName() == tmpFile)
layers = project2.mapLayers()
self.assertEqual(len(layers.keys()), 2)
self.assertTrue(layers[l0.id()].isValid(), True)
self.assertTrue(layers[l1.id()].isValid(), True)
project2.clear()
self.assertFalse(project2.isZipped())
def testUpgradeOtfFrom2x(self):
"""
Test that upgrading a 2.x project correctly brings across project CRS and OTF transformation settings
"""
prj = QgsProject.instance()
prj.read(os.path.join(TEST_DATA_DIR, 'projects', 'test_memory_layer_proj.qgs'))
self.assertTrue(prj.crs().isValid())
self.assertEqual(prj.crs().authid(), 'EPSG:2056')
def testSnappingChangedSignal(self):
"""
Test the snappingConfigChanged signal
"""
project = QgsProject()
spy = QSignalSpy(project.snappingConfigChanged)
l0 = QgsVectorLayer(os.path.join(TEST_DATA_DIR, "points.shp"), "points", "ogr")
l1 = QgsVectorLayer(os.path.join(TEST_DATA_DIR, "lines.shp"), "lines", "ogr")
l2 = QgsVectorLayer(os.path.join(TEST_DATA_DIR, "polys.shp"), "polys", "ogr")
project.addMapLayers([l0, l1])
self.assertEqual(len(spy), 1)
project.addMapLayer(l2)
self.assertEqual(len(spy), 2)
self.assertEqual(len(project.snappingConfig().individualLayerSettings()), 3)
tmpDir = QTemporaryDir()
tmpFile = "{}/project_snap.qgs".format(tmpDir.path())
self.assertTrue(project.write(tmpFile))
# only ONE signal!
project.clear()
self.assertEqual(len(spy), 3)
self.assertFalse(project.snappingConfig().individualLayerSettings())
p2 = QgsProject()
spy2 = QSignalSpy(p2.snappingConfigChanged)
p2.read(tmpFile)
# only ONE signal!
self.assertEqual(len(spy2), 1)
self.assertEqual(len(p2.snappingConfig().individualLayerSettings()), 3)
p2.removeAllMapLayers()
self.assertEqual(len(spy2), 2)
self.assertFalse(p2.snappingConfig().individualLayerSettings())
def testRelativePaths(self):
"""
Test whether paths to layer sources are stored as relative to the project path
"""
tmpDir = QTemporaryDir()
tmpFile = "{}/project.qgs".format(tmpDir.path())
copyfile(os.path.join(TEST_DATA_DIR, "points.shp"), os.path.join(tmpDir.path(), "points.shp"))
copyfile(os.path.join(TEST_DATA_DIR, "points.dbf"), os.path.join(tmpDir.path(), "points.dbf"))
copyfile(os.path.join(TEST_DATA_DIR, "points.shx"), os.path.join(tmpDir.path(), "points.shx"))
copyfile(os.path.join(TEST_DATA_DIR, "lines.shp"), os.path.join(tmpDir.path(), "lines.shp"))
copyfile(os.path.join(TEST_DATA_DIR, "lines.dbf"), os.path.join(tmpDir.path(), "lines.dbf"))
copyfile(os.path.join(TEST_DATA_DIR, "lines.shx"), os.path.join(tmpDir.path(), "lines.shx"))
copyfile(os.path.join(TEST_DATA_DIR, "landsat_4326.tif"), os.path.join(tmpDir.path(), "landsat_4326.tif"))
project = QgsProject()
l0 = QgsVectorLayer(os.path.join(tmpDir.path(), "points.shp"), "points", "ogr")
l1 = QgsVectorLayer(os.path.join(tmpDir.path(), "lines.shp"), "lines", "ogr")
l2 = QgsRasterLayer(os.path.join(tmpDir.path(), "landsat_4326.tif"), "landsat", "gdal")
self.assertTrue(l0.isValid())
self.assertTrue(l1.isValid())
self.assertTrue(l2.isValid())
self.assertTrue(project.addMapLayers([l0, l1, l2]))
self.assertTrue(project.write(tmpFile))
del project
with open(tmpFile, 'r') as f:
content = ''.join(f.readlines())
self.assertTrue('source="./lines.shp"' in content)
self.assertTrue('source="./points.shp"' in content)
self.assertTrue('source="./landsat_4326.tif"' in content)
# Re-read the project and store absolute
project = QgsProject()
self.assertTrue(project.read(tmpFile))
store = project.layerStore()
self.assertEqual(set([l.name() for l in store.mapLayers().values()]), set(['lines', 'landsat', 'points']))
project.writeEntryBool('Paths', '/Absolute', True)
tmpFile2 = "{}/project2.qgs".format(tmpDir.path())
self.assertTrue(project.write(tmpFile2))
with open(tmpFile2, 'r') as f:
content = ''.join(f.readlines())
self.assertTrue('source="{}/lines.shp"'.format(tmpDir.path()) in content)
self.assertTrue('source="{}/points.shp"'.format(tmpDir.path()) in content)
self.assertTrue('source="{}/landsat_4326.tif"'.format(tmpDir.path()) in content)
del project
def testRelativePathsGpkg(self):
"""
Test whether paths to layer sources are stored as relative to the project path with GPKG storage
"""
def _check_datasource(_path):
# Verify datasource path stored in the project
ds = ogr.GetDriverByName('GPKG').Open(_path)
l = ds.GetLayer(1)
self.assertEqual(l.GetName(), 'qgis_projects')
self.assertEqual(l.GetFeatureCount(), 1)
f = l.GetFeature(1)
zip_content = BytesIO(codecs.decode(f.GetFieldAsBinary(2), 'hex'))
z = ZipFile(zip_content)
qgs = z.read(z.filelist[0])
self.assertEqual(re.findall(b'<datasource>(.*)?</datasource>', qgs)[1],
b'./relative_paths_gh30387.gpkg|layername=some_data')
with TemporaryDirectory() as d:
path = os.path.join(d, 'relative_paths_gh30387.gpkg')
copyfile(os.path.join(TEST_DATA_DIR, 'projects', 'relative_paths_gh30387.gpkg'), path)
project = QgsProject()
l = QgsVectorLayer(path + '|layername=some_data', 'mylayer', 'ogr')
self.assertTrue(l.isValid())
self.assertTrue(project.addMapLayers([l]))
self.assertEqual(project.count(), 1)
# Project URI
uri = 'geopackage://{}?projectName=relative_project'.format(path)
project.setFileName(uri)
self.assertTrue(project.write())
# Verify
project = QgsProject()
self.assertTrue(project.read(uri))
self.assertEqual(project.writePath(path), './relative_paths_gh30387.gpkg')
_check_datasource(path)
for _, l in project.mapLayers().items():
self.assertTrue(l.isValid())
with TemporaryDirectory() as d2:
# Move it!
path2 = os.path.join(d2, 'relative_paths_gh30387.gpkg')
copyfile(path, path2)
# Delete old temporary dir
del d
# Verify moved
project = QgsProject()
uri2 = 'geopackage://{}?projectName=relative_project'.format(path2)
self.assertTrue(project.read(uri2))
_check_datasource(path2)
self.assertEqual(project.count(), 1)
for _, l in project.mapLayers().items():
self.assertTrue(l.isValid())
def testSymbolicLinkInProjectPath(self):
"""
Test whether paths to layer sources relative to the project are stored correctly
when project'name contains a symbolic link.
In other words, test if project's and layers' names are correctly resolved.
"""
tmpDir = QTemporaryDir()
tmpFile = "{}/project.qgs".format(tmpDir.path())
copyfile(os.path.join(TEST_DATA_DIR, "points.shp"), os.path.join(tmpDir.path(), "points.shp"))
copyfile(os.path.join(TEST_DATA_DIR, "points.dbf"), os.path.join(tmpDir.path(), "points.dbf"))
copyfile(os.path.join(TEST_DATA_DIR, "points.shx"), os.path.join(tmpDir.path(), "points.shx"))
copyfile(os.path.join(TEST_DATA_DIR, "lines.shp"), os.path.join(tmpDir.path(), "lines.shp"))
copyfile(os.path.join(TEST_DATA_DIR, "lines.dbf"), os.path.join(tmpDir.path(), "lines.dbf"))
copyfile(os.path.join(TEST_DATA_DIR, "lines.shx"), os.path.join(tmpDir.path(), "lines.shx"))
copyfile(os.path.join(TEST_DATA_DIR, "landsat_4326.tif"), os.path.join(tmpDir.path(), "landsat_4326.tif"))
project = QgsProject()
l0 = QgsVectorLayer(os.path.join(tmpDir.path(), "points.shp"), "points", "ogr")
l1 = QgsVectorLayer(os.path.join(tmpDir.path(), "lines.shp"), "lines", "ogr")
l2 = QgsRasterLayer(os.path.join(tmpDir.path(), "landsat_4326.tif"), "landsat", "gdal")
self.assertTrue(l0.isValid())
self.assertTrue(l1.isValid())
self.assertTrue(l2.isValid())
self.assertTrue(project.addMapLayers([l0, l1, l2]))
self.assertTrue(project.write(tmpFile))
del project
# Create symbolic link to previous project
tmpDir2 = QTemporaryDir()
symlinkDir = os.path.join(tmpDir2.path(), "dir")
os.symlink(tmpDir.path(), symlinkDir)
tmpFile = "{}/project.qgs".format(symlinkDir)
# Open project from symmlink and force re-save.
project = QgsProject()
self.assertTrue(project.read(tmpFile))
self.assertTrue(project.write(tmpFile))
del project
with open(tmpFile, 'r') as f:
content = ''.join(f.readlines())
self.assertTrue('source="./lines.shp"' in content)
self.assertTrue('source="./points.shp"' in content)
self.assertTrue('source="./landsat_4326.tif"' in content)
def testHomePath(self):
p = QgsProject()
path_changed_spy = QSignalSpy(p.homePathChanged)
self.assertFalse(p.homePath())
self.assertFalse(p.presetHomePath())
# simulate save file
tmp_dir = QTemporaryDir()
tmp_file = "{}/project.qgs".format(tmp_dir.path())
with open(tmp_file, 'w') as f:
pass
p.setFileName(tmp_file)
# home path should be file path
self.assertEqual(p.homePath(), tmp_dir.path())
self.assertFalse(p.presetHomePath())
self.assertEqual(len(path_changed_spy), 1)
# manually override home path
p.setPresetHomePath('/tmp/my_path')
self.assertEqual(p.homePath(), '/tmp/my_path')
self.assertEqual(p.presetHomePath(), '/tmp/my_path')
self.assertEqual(len(path_changed_spy), 2)
# check project scope
scope = QgsExpressionContextUtils.projectScope(p)
self.assertEqual(scope.variable('project_home'), '/tmp/my_path')
# no extra signal if path is unchanged
p.setPresetHomePath('/tmp/my_path')
self.assertEqual(p.homePath(), '/tmp/my_path')
self.assertEqual(p.presetHomePath(), '/tmp/my_path')
self.assertEqual(len(path_changed_spy), 2)
# setting file name should not affect home path is manually set
tmp_file_2 = "{}/project/project2.qgs".format(tmp_dir.path())
os.mkdir(tmp_dir.path() + '/project')
with open(tmp_file_2, 'w') as f:
pass
p.setFileName(tmp_file_2)
self.assertEqual(p.homePath(), '/tmp/my_path')
self.assertEqual(p.presetHomePath(), '/tmp/my_path')
self.assertEqual(len(path_changed_spy), 2)
scope = QgsExpressionContextUtils.projectScope(p)
self.assertEqual(scope.variable('project_home'), '/tmp/my_path')
# clear manual path
p.setPresetHomePath('')
self.assertEqual(p.homePath(), tmp_dir.path() + '/project')
self.assertFalse(p.presetHomePath())
self.assertEqual(len(path_changed_spy), 3)
scope = QgsExpressionContextUtils.projectScope(p)
self.assertEqual(scope.variable('project_home'), tmp_dir.path() + '/project')
# relative path
p.setPresetHomePath('../home')
self.assertEqual(p.homePath(), tmp_dir.path() + '/home')
self.assertEqual(p.presetHomePath(), '../home')
self.assertEqual(len(path_changed_spy), 4)
scope = QgsExpressionContextUtils.projectScope(p)
self.assertEqual(scope.variable('project_home'), tmp_dir.path() + '/home')
# relative path, no filename
p.setFileName('')
self.assertEqual(p.homePath(), '../home')
self.assertEqual(p.presetHomePath(), '../home')
scope = QgsExpressionContextUtils.projectScope(p)
self.assertEqual(scope.variable('project_home'), '../home')
p = QgsProject()
path_changed_spy = QSignalSpy(p.homePathChanged)
p.setFileName('/tmp/not/existing/here/path.qgz')
self.assertFalse(p.presetHomePath())
self.assertEqual(p.homePath(), '/tmp/not/existing/here')
self.assertEqual(len(path_changed_spy), 1)
def testDirtyBlocker(self):
# first test manual QgsProjectDirtyBlocker construction
p = QgsProject()
dirty_spy = QSignalSpy(p.isDirtyChanged)
# ^ will do *whatever* it takes to discover the enemy's secret plans!
# simple checks
p.setDirty(True)
self.assertTrue(p.isDirty())
self.assertEqual(len(dirty_spy), 1)
self.assertEqual(dirty_spy[-1], [True])
p.setDirty(True) # already dirty
self.assertTrue(p.isDirty())
self.assertEqual(len(dirty_spy), 1)
p.setDirty(False)
self.assertFalse(p.isDirty())
self.assertEqual(len(dirty_spy), 2)
self.assertEqual(dirty_spy[-1], [False])
p.setDirty(True)
self.assertTrue(p.isDirty())
self.assertEqual(len(dirty_spy), 3)
self.assertEqual(dirty_spy[-1], [True])
# with a blocker
blocker = QgsProjectDirtyBlocker(p)
# blockers will allow cleaning projects
p.setDirty(False)
self.assertFalse(p.isDirty())
self.assertEqual(len(dirty_spy), 4)
self.assertEqual(dirty_spy[-1], [False])
# but not dirtying!
p.setDirty(True)
self.assertFalse(p.isDirty())
self.assertEqual(len(dirty_spy), 4)
self.assertEqual(dirty_spy[-1], [False])
# nested block
blocker2 = QgsProjectDirtyBlocker(p)
p.setDirty(True)
self.assertFalse(p.isDirty())
self.assertEqual(len(dirty_spy), 4)
self.assertEqual(dirty_spy[-1], [False])
del blocker2
p.setDirty(True)
self.assertFalse(p.isDirty())
self.assertEqual(len(dirty_spy), 4)
self.assertEqual(dirty_spy[-1], [False])
del blocker
p.setDirty(True)
self.assertTrue(p.isDirty())
self.assertEqual(len(dirty_spy), 5)
self.assertEqual(dirty_spy[-1], [True])
# using python context manager
with QgsProject.blockDirtying(p):
# cleaning allowed
p.setDirty(False)
self.assertFalse(p.isDirty())
self.assertEqual(len(dirty_spy), 6)
self.assertEqual(dirty_spy[-1], [False])
# but not dirtying!
p.setDirty(True)
self.assertFalse(p.isDirty())
self.assertEqual(len(dirty_spy), 6)
self.assertEqual(dirty_spy[-1], [False])
# unblocked
p.setDirty(True)
self.assertTrue(p.isDirty())
self.assertEqual(len(dirty_spy), 7)
self.assertEqual(dirty_spy[-1], [True])
def testCustomLayerOrderFrom2xProject(self):
prj = QgsProject.instance()
prj.read(os.path.join(TEST_DATA_DIR, 'layer_rendering_order_issue_qgis3.qgs'))
layer_x = prj.mapLayers()['x20180406151213536']
layer_y = prj.mapLayers()['y20180406151217017']
# check layer order
tree = prj.layerTreeRoot()
self.assertEqual(tree.children()[0].layer(), layer_x)
self.assertEqual(tree.children()[1].layer(), layer_y)
self.assertTrue(tree.hasCustomLayerOrder())
self.assertEqual(tree.customLayerOrder(), [layer_y, layer_x])
self.assertEqual(tree.layerOrder(), [layer_y, layer_x])
def testCustomLayerOrderFrom3xProject(self):
prj = QgsProject.instance()
prj.read(os.path.join(TEST_DATA_DIR, 'layer_rendering_order_qgis3_project.qgs'))
layer_x = prj.mapLayers()['x20180406151213536']
layer_y = prj.mapLayers()['y20180406151217017']
# check layer order
tree = prj.layerTreeRoot()
self.assertEqual(tree.children()[0].layer(), layer_x)
self.assertEqual(tree.children()[1].layer(), layer_y)
self.assertTrue(tree.hasCustomLayerOrder())
self.assertEqual(tree.customLayerOrder(), [layer_y, layer_x])
self.assertEqual(tree.layerOrder(), [layer_y, layer_x])
def testPalPropertiesReadWrite(self):
tmpDir = QTemporaryDir()
tmpFile = "{}/project.qgs".format(tmpDir.path())
s0 = QgsLabelingEngineSettings()
s0.setMaximumLineCandidatesPerCm(33)
p0 = QgsProject()
p0.setFileName(tmpFile)
p0.setLabelingEngineSettings(s0)
p0.write()
p1 = QgsProject()
p1.read(tmpFile)
s1 = p1.labelingEngineSettings()
self.assertEqual(s1.maximumLineCandidatesPerCm(), 33)
def testLayerChangeDirtiesProject(self):
"""
Test that making changes to certain layer properties results in dirty projects
"""
p = QgsProject()
l = QgsVectorLayer(os.path.join(TEST_DATA_DIR, "points.shp"), "points", "ogr")
self.assertTrue(l.isValid())
self.assertTrue(p.addMapLayers([l]))
p.setDirty(False)
l.setCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertTrue(p.isDirty())
p.setDirty(False)
l.setName('test')
self.assertTrue(p.isDirty())
p.setDirty(False)
self.assertTrue(l.setSubsetString('class=\'a\''))
self.assertTrue(p.isDirty())
def testProjectTitleWithPeriod(self):
tmpDir = QTemporaryDir()
tmpFile = "{}/2.18.21.qgs".format(tmpDir.path())
tmpFile2 = "{}/qgis-3.2.0.qgs".format(tmpDir.path())
p0 = QgsProject()
p0.setFileName(tmpFile)
p1 = QgsProject()
p1.setFileName(tmpFile2)
self.assertEqual(p0.baseName(), '2.18.21')
self.assertEqual(p1.baseName(), 'qgis-3.2.0')
def testWriteEntry(self):
tmpDir = QTemporaryDir()
tmpFile = "{}/project.qgs".format(tmpDir.path())
# zip with existing file
project = QgsProject()
query = 'select * from "sample DH" where "sample DH"."Elev" > 130 and "sample DH"."Elev" < 140'
self.assertTrue(project.writeEntry('myscope', 'myentry', query))
self.assertTrue(project.write(tmpFile))
self.assertTrue(project.read(tmpFile))
q, ok = project.readEntry('myscope', 'myentry')
self.assertTrue(ok)
self.assertEqual(q, query)
def testDirtying(self):
project = QgsProject()
# writing a new entry should dirty the project
project.setDirty(False)
self.assertTrue(project.writeEntry('myscope', 'myentry', True))
self.assertTrue(project.isDirty())
# over-writing a pre-existing entry with the same value should _not_ dirty the project
project.setDirty(False)
self.assertTrue(project.writeEntry('myscope', 'myentry', True))
self.assertFalse(project.isDirty())
# over-writing a pre-existing entry with a different value should dirty the project
project.setDirty(False)
self.assertTrue(project.writeEntry('myscope', 'myentry', False))
self.assertTrue(project.isDirty())
# removing an existing entry should dirty the project
project.setDirty(False)
self.assertTrue(project.removeEntry('myscope', 'myentry'))
self.assertTrue(project.isDirty())
# removing a non-existing entry should _not_ dirty the project
project.setDirty(False)
self.assertTrue(project.removeEntry('myscope', 'myentry'))
self.assertFalse(project.isDirty())
# setting a project CRS with a new value should dirty the project
project.setCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
project.setDirty(False)
project.setCrs(QgsCoordinateReferenceSystem('EPSG:3148'))
self.assertTrue(project.isDirty())
# setting a project CRS with the same project CRS should not dirty the project
project.setDirty(False)
project.setCrs(QgsCoordinateReferenceSystem('EPSG:3148'))
self.assertFalse(project.isDirty())
def testBackgroundColor(self):
p = QgsProject()
s = QgsSettings()
red = int(s.value("qgis/default_canvas_color_red", 255))
green = int(s.value("qgis/default_canvas_color_green", 255))
blue = int(s.value("qgis/default_canvas_color_blue", 255))
# test default canvas background color
self.assertEqual(p.backgroundColor(), QColor(red, green, blue))
spy = QSignalSpy(p.backgroundColorChanged)
p.setBackgroundColor(QColor(0, 0, 0))
self.assertEqual(len(spy), 1)
# test customized canvas background color
self.assertEqual(p.backgroundColor(), QColor(0, 0, 0))
# test signal not emitted when color doesn't actually change
p.setBackgroundColor(QColor(0, 0, 0))
self.assertEqual(len(spy), 1)
def testSelectionColor(self):
p = QgsProject()
s = QgsSettings()
red = int(s.value("qgis/default_selection_color_red", 255))
green = int(s.value("qgis/default_selection_color_green", 255))
blue = int(s.value("qgis/default_selection_color_blue", 0))
alpha = int(s.value("qgis/default_selection_color_alpha", 255))
# test default feature selection color
self.assertEqual(p.selectionColor(), QColor(red, green, blue, alpha))
spy = QSignalSpy(p.selectionColorChanged)
p.setSelectionColor(QColor(0, 0, 0, 50))
self.assertEqual(len(spy), 1)
# test customized feature selection color
self.assertEqual(p.selectionColor(), QColor(0, 0, 0, 50))
# test signal not emitted when color doesn't actually change
p.setSelectionColor(QColor(0, 0, 0, 50))
self.assertEqual(len(spy), 1)
def testColorScheme(self):
p = QgsProject.instance()
spy = QSignalSpy(p.projectColorsChanged)
p.setProjectColors([[QColor(255, 0, 0), 'red'], [QColor(0, 255, 0), 'green']])
self.assertEqual(len(spy), 1)
scheme = [s for s in QgsApplication.colorSchemeRegistry().schemes() if isinstance(s, QgsProjectColorScheme)][0]
self.assertEqual([[c[0].name(), c[1]] for c in scheme.fetchColors()],
[['#ff0000', 'red'], ['#00ff00', 'green']])
# except color changed signal when clearing project
p.clear()
self.assertEqual(len(spy), 2)
self.assertEqual([[c[0].name(), c[1]] for c in scheme.fetchColors()], [])
# should be no signal on project destruction -- can cause a crash
p = QgsProject()
spy = QSignalSpy(p.projectColorsChanged)
p.deleteLater()
del p
self.assertEqual(len(spy), 0)
def testTransformContextSignalIsEmitted(self):
"""Test that when a project transform context changes a transformContextChanged signal is emitted"""
p = QgsProject()
spy = QSignalSpy(p.transformContextChanged)
ctx = QgsCoordinateTransformContext()
ctx.addSourceDestinationDatumTransform(QgsCoordinateReferenceSystem(4326), QgsCoordinateReferenceSystem(3857),
1234, 1235)
ctx.addCoordinateOperation(QgsCoordinateReferenceSystem(4326), QgsCoordinateReferenceSystem(3857), 'x')
p.setTransformContext(ctx)
self.assertEqual(len(spy), 1)
def testGpkgDirtyingWhenRemovedFromStorage(self):
"""Test that when a GPKG stored project is removed from the storage it is marked dirty"""
with TemporaryDirectory() as d:
path = os.path.join(d, 'relative_paths_gh30387.gpkg')
copyfile(os.path.join(TEST_DATA_DIR, 'projects', 'relative_paths_gh30387.gpkg'), path)
project = QgsProject.instance()
# Project URI
uri = 'geopackage://{}?projectName=relative_project'.format(path)
project.setFileName(uri)
self.assertTrue(project.write())
# Verify
self.assertTrue(project.read(uri))
self.assertFalse(project.isDirty())
# Remove from storage
storage = QgsApplication.projectStorageRegistry().projectStorageFromUri(uri)
self.assertTrue(storage.removeProject(uri))
self.assertTrue(project.isDirty())
# Save it back
self.assertTrue(project.write())
self.assertFalse(project.isDirty())
# Reload
self.assertTrue(project.read(uri))
def testMapScales(self):
p = QgsProject()
self.assertFalse(p.mapScales())
self.assertFalse(p.useProjectScales())
spy = QSignalSpy(p.mapScalesChanged)
p.setMapScales([])
self.assertEqual(len(spy), 0)
p.setUseProjectScales(False)
self.assertEqual(len(spy), 0)
p.setMapScales([5000, 6000, 3000, 4000])
# scales must be sorted
self.assertEqual(p.mapScales(), [6000.0, 5000.0, 4000.0, 3000.0])
self.assertEqual(len(spy), 1)
p.setMapScales([5000, 6000, 3000, 4000])
self.assertEqual(len(spy), 1)
self.assertEqual(p.mapScales(), [6000.0, 5000.0, 4000.0, 3000.0])
p.setMapScales([5000, 6000, 3000, 4000, 1000])
self.assertEqual(len(spy), 2)
self.assertEqual(p.mapScales(), [6000.0, 5000.0, 4000.0, 3000.0, 1000.0])
p.setUseProjectScales(True)
self.assertEqual(len(spy), 3)
p.setUseProjectScales(True)
self.assertEqual(len(spy), 3)
p.setUseProjectScales(False)
self.assertEqual(len(spy), 4)
def testSetInstance(self):
"""Test singleton API"""
p = QgsProject()
self.assertNotEqual(p, QgsProject.instance())
QgsProject.setInstance(p)
self.assertEqual(p, QgsProject.instance())
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
parksjin01/ctf
|
2013/Pico/Trivial.py
|
1
|
1130
|
#!/usr/bin/env python
import sys
import binascii
alphaL = "abcdefghijklnmopqrstuvqxyz"
alphaU = "ABCDEFGHIJKLMNOPQRSTUVQXYZ"
num = "0123456789"
keychars = num+alphaL+alphaU
ciphertext = "Bot kmws mikferuigmzf rmfrxrwqe abs perudsf! Nvm kda ut ab8bv_w4ue0_ab8v_DDU"
res = ''
key = [55, 0, 25, 54, 3, 12, 27, 14, 7, 20, 14, 34]
for i in range(len(ciphertext)):
if ciphertext[i] in alphaL:
rotate_amount = key[i%len(key)]%26
enc_char = ord(ciphertext[i]) - rotate_amount
if enc_char < ord('a'):
enc_char = -ord('a') + ord('z') + enc_char +1
elif ciphertext[i] in alphaU:
rotate_amount = key[i%len(key)]%26
enc_char = ord(ciphertext[i]) - rotate_amount
if enc_char < ord('A'):
enc_char = -ord('A') + ord('Z') + enc_char +1
elif ciphertext[i] in num:
rotate_amount = key[i%len(key)]%10
enc_char = ord(ciphertext[i]) - rotate_amount
if enc_char < ord('0'):
enc_char = -ord('0') + ord('9') + enc_char +1
else:
enc_char = ord(ciphertext[i])
res += chr(enc_char)
print res, binascii.hexlify(res)
|
mit
|
n3storm/seantis-questionnaire
|
questionnaire/qprocessors/simple.py
|
1
|
3543
|
from questionnaire import *
from django.utils.translation import ugettext as _
from django.utils.simplejson import dumps
@question_proc('choice-yesno','choice-yesnocomment','choice-yesnodontknow')
def question_yesno(request, question):
key = "question_%s" % question.number
key2 = "question_%s_comment" % question.number
val = request.POST.get(key, None)
cmt = request.POST.get(key2, '')
qtype = question.get_type()
cd = question.getcheckdict()
jstriggers = []
if qtype == 'choice-yesnocomment':
hascomment = True
else:
hascomment = False
if qtype == 'choice-yesnodontknow' or 'dontknow' in cd:
hasdontknow = True
else:
hasdontknow = False
if not val:
if cd.get('default', None):
val = cd['default']
checks = ''
if hascomment:
if cd.get('required-yes'):
jstriggers = ['%s_comment' % question.number]
checks = ' checks="dep_check(\'%s,yes\')"' % question.number
elif cd.get('required-no'):
checks = ' checks="dep_check(\'%s,no\')"' % question.number
elif cd.get('required-dontknow'):
checks = ' checks="dep_check(\'%s,dontknow\')"' % question.number
return {
'required' : True,
'checks' : checks,
'value' : val,
'qvalue' : '',
'hascomment' : hascomment,
'hasdontknow' : hasdontknow,
'comment' : cmt,
'jstriggers' : jstriggers,
'template' : 'questionnaire/choice-yesnocomment.html',
}
@question_proc('open', 'open-textfield')
def question_open(request, question):
key = "question_%s" % question.number
value = question.getcheckdict().get('default','')
if key in request.POST:
value = request.POST[key]
return {
'required' : question.getcheckdict().get('required', False),
'value' : value,
}
@answer_proc('open', 'open-textfield', 'choice-yesno', 'choice-yesnocomment', 'choice-yesnodontknow')
def process_simple(question, ansdict):
checkdict = question.getcheckdict()
ans = ansdict['ANSWER'] or ''
qtype = question.get_type()
if qtype.startswith('choice-yesno'):
if ans not in ('yes','no','dontknow'):
raise AnswerException(_(u'You must select an option'))
if qtype == 'choice-yesnocomment' \
and len(ansdict.get('comment','').strip()) == 0:
if checkdict.get('required', False):
raise AnswerException(_(u'Field cannot be blank'))
if checkdict.get('required-yes', False) and ans == 'yes':
raise AnswerException(_(u'Field cannot be blank'))
if checkdict.get('required-no', False) and ans == 'no':
raise AnswerException(_(u'Field cannot be blank'))
else:
if not ans.strip() and checkdict.get('required', False):
raise AnswerException(_(u'Field cannot be blank'))
if ansdict.has_key('comment') and len(ansdict['comment']) > 0:
return dumps([ans, [ansdict['comment']]])
if ans:
return dumps([ans])
return dumps([])
add_type('open', 'Open Answer, single line [input]')
add_type('open-textfield', 'Open Answer, multi-line [textarea]')
add_type('choice-yesno', 'Yes/No Choice [radio]')
add_type('choice-yesnocomment', 'Yes/No Choice with optional comment [radio, input]')
add_type('choice-yesnodontknow', 'Yes/No/Don\'t know Choice [radio]')
@answer_proc('comment')
def process_comment(question, answer):
pass
add_type('comment', 'Comment Only')
|
bsd-3-clause
|
vn09/ns-3-dev-git
|
src/virtual-net-device/bindings/modulegen__gcc_LP64.py
|
28
|
221169
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.virtual_net_device', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## virtual-net-device.h (module 'virtual-net-device'): ns3::VirtualNetDevice [class]
module.add_class('VirtualNetDevice', parent=root_module['ns3::NetDevice'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3VirtualNetDevice_methods(root_module, root_module['ns3::VirtualNetDevice'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function]
cls.add_method('GetLocalTime',
'ns3::Time',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3VirtualNetDevice_methods(root_module, cls):
## virtual-net-device.h (module 'virtual-net-device'): ns3::VirtualNetDevice::VirtualNetDevice(ns3::VirtualNetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::VirtualNetDevice const &', 'arg0')])
## virtual-net-device.h (module 'virtual-net-device'): ns3::VirtualNetDevice::VirtualNetDevice() [constructor]
cls.add_constructor([])
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Ptr<ns3::Channel> ns3::VirtualNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): uint32_t ns3::VirtualNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): uint16_t ns3::VirtualNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Ptr<ns3::Node> ns3::VirtualNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): static ns3::TypeId ns3::VirtualNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::Receive(ns3::Ptr<ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('Receive',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')])
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetIsPointToPoint(bool isPointToPoint) [member function]
cls.add_method('SetIsPointToPoint',
'void',
[param('bool', 'isPointToPoint')])
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetNeedsArp(bool needsArp) [member function]
cls.add_method('SetNeedsArp',
'void',
[param('bool', 'needsArp')])
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetSendCallback(ns3::Callback<bool, ns3::Ptr<ns3::Packet>, ns3::Address const&, ns3::Address const&, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> transmitCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::Address const &, ns3::Address const &, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'transmitCb')])
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetSupportsSendFrom(bool supportsSendFrom) [member function]
cls.add_method('SetSupportsSendFrom',
'void',
[param('bool', 'supportsSendFrom')])
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
gpl-2.0
|
habedi/Emacs-theme-creator
|
venv/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/tests/test_resources.py
|
62
|
24677
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# NOTE: the shebang and encoding lines are for ScriptHeaderTests; do not remove
from unittest import TestCase, makeSuite; from pkg_resources import *
from setuptools.command.easy_install import get_script_header, is_sh
import os, pkg_resources, sys, StringIO, tempfile, shutil
try: frozenset
except NameError:
from sets import ImmutableSet as frozenset
def safe_repr(obj, short=False):
""" copied from Python2.7"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
class Metadata(EmptyProvider):
"""Mock object to return metadata as if from an on-disk distribution"""
def __init__(self,*pairs):
self.metadata = dict(pairs)
def has_metadata(self,name):
return name in self.metadata
def get_metadata(self,name):
return self.metadata[name]
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class DistroTests(TestCase):
def testCollection(self):
# empty path should produce no distributions
ad = Environment([], platform=None, python=None)
self.assertEqual(list(ad), [])
self.assertEqual(ad['FooPkg'],[])
ad.add(Distribution.from_filename("FooPkg-1.3_1.egg"))
ad.add(Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg"))
ad.add(Distribution.from_filename("FooPkg-1.2-py2.4.egg"))
# Name is in there now
self.assert_(ad['FooPkg'])
# But only 1 package
self.assertEqual(list(ad), ['foopkg'])
# Distributions sort by version
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2']
)
# Removing a distribution leaves sequence alone
ad.remove(ad['FooPkg'][1])
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.2']
)
# And inserting adds them in order
ad.add(Distribution.from_filename("FooPkg-1.9.egg"))
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2']
)
ws = WorkingSet([])
foo12 = Distribution.from_filename("FooPkg-1.2-py2.4.egg")
foo14 = Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg")
req, = parse_requirements("FooPkg>=1.3")
# Nominal case: no distros on path, should yield all applicable
self.assertEqual(ad.best_match(req,ws).version, '1.9')
# If a matching distro is already installed, should return only that
ws.add(foo14); self.assertEqual(ad.best_match(req,ws).version, '1.4')
# If the first matching distro is unsuitable, it's a version conflict
ws = WorkingSet([]); ws.add(foo12); ws.add(foo14)
self.assertRaises(VersionConflict, ad.best_match, req, ws)
# If more than one match on the path, the first one takes precedence
ws = WorkingSet([]); ws.add(foo14); ws.add(foo12); ws.add(foo14);
self.assertEqual(ad.best_match(req,ws).version, '1.4')
def checkFooPkg(self,d):
self.assertEqual(d.project_name, "FooPkg")
self.assertEqual(d.key, "foopkg")
self.assertEqual(d.version, "1.3-1")
self.assertEqual(d.py_version, "2.4")
self.assertEqual(d.platform, "win32")
self.assertEqual(d.parsed_version, parse_version("1.3-1"))
def testDistroBasics(self):
d = Distribution(
"/some/path",
project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32"
)
self.checkFooPkg(d)
d = Distribution("/some/path")
self.assertEqual(d.py_version, sys.version[:3])
self.assertEqual(d.platform, None)
def testDistroParse(self):
d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg")
self.checkFooPkg(d)
d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg-info")
self.checkFooPkg(d)
def testDistroMetadata(self):
d = Distribution(
"/some/path", project_name="FooPkg", py_version="2.4", platform="win32",
metadata = Metadata(
('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n")
)
)
self.checkFooPkg(d)
def distRequires(self, txt):
return Distribution("/foo", metadata=Metadata(('depends.txt', txt)))
def checkRequires(self, dist, txt, extras=()):
self.assertEqual(
list(dist.requires(extras)),
list(parse_requirements(txt))
)
def testDistroDependsSimple(self):
for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0":
self.checkRequires(self.distRequires(v), v)
def testResolve(self):
ad = Environment([]); ws = WorkingSet([])
# Resolving no requirements -> nothing to install
self.assertEqual( list(ws.resolve([],ad)), [] )
# Request something not in the collection -> DistributionNotFound
self.assertRaises(
DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad
)
Foo = Distribution.from_filename(
"/foo_dir/Foo-1.2.egg",
metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
)
ad.add(Foo); ad.add(Distribution.from_filename("Foo-0.9.egg"))
# Request thing(s) that are available -> list to activate
for i in range(3):
targets = list(ws.resolve(parse_requirements("Foo"), ad))
self.assertEqual(targets, [Foo])
map(ws.add,targets)
self.assertRaises(VersionConflict, ws.resolve,
parse_requirements("Foo==0.9"), ad)
ws = WorkingSet([]) # reset
# Request an extra that causes an unresolved dependency for "Baz"
self.assertRaises(
DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad
)
Baz = Distribution.from_filename(
"/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
)
ad.add(Baz)
# Activation list now includes resolved dependency
self.assertEqual(
list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz]
)
# Requests for conflicting versions produce VersionConflict
self.assertRaises( VersionConflict,
ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad
)
def testDistroDependsOptions(self):
d = self.distRequires("""
Twisted>=1.5
[docgen]
ZConfig>=2.0
docutils>=0.3
[fastcgi]
fcgiapp>=0.1""")
self.checkRequires(d,"Twisted>=1.5")
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(),
["docgen","fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(),
["fastcgi", "docgen"]
)
self.assertRaises(UnknownExtra, d.requires, ["foo"])
def testSetuptoolsDistributeCombination(self):
# Ensure that installing a 0.7-series setuptools fails. PJE says that
# it will not co-exist.
ws = WorkingSet([])
d = Distribution(
"/some/path",
project_name="setuptools",
version="0.7a1")
self.assertRaises(ValueError, ws.add, d)
# A 0.6-series is no problem
d2 = Distribution(
"/some/path",
project_name="setuptools",
version="0.6c9")
ws.add(d2)
# a unexisting version needs to work
ws = WorkingSet([])
d3 = Distribution(
"/some/path",
project_name="setuptools")
ws.add(d3)
class EntryPointTests(TestCase):
def assertfields(self, ep):
self.assertEqual(ep.name,"foo")
self.assertEqual(ep.module_name,"setuptools.tests.test_resources")
self.assertEqual(ep.attrs, ("EntryPointTests",))
self.assertEqual(ep.extras, ("x",))
self.assert_(ep.load() is EntryPointTests)
self.assertEqual(
str(ep),
"foo = setuptools.tests.test_resources:EntryPointTests [x]"
)
def setUp(self):
self.dist = Distribution.from_filename(
"FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]')))
def testBasics(self):
ep = EntryPoint(
"foo", "setuptools.tests.test_resources", ["EntryPointTests"],
["x"], self.dist
)
self.assertfields(ep)
def testParse(self):
s = "foo = setuptools.tests.test_resources:EntryPointTests [x]"
ep = EntryPoint.parse(s, self.dist)
self.assertfields(ep)
ep = EntryPoint.parse("bar baz= spammity[PING]")
self.assertEqual(ep.name,"bar baz")
self.assertEqual(ep.module_name,"spammity")
self.assertEqual(ep.attrs, ())
self.assertEqual(ep.extras, ("ping",))
ep = EntryPoint.parse(" fizzly = wocka:foo")
self.assertEqual(ep.name,"fizzly")
self.assertEqual(ep.module_name,"wocka")
self.assertEqual(ep.attrs, ("foo",))
self.assertEqual(ep.extras, ())
def testRejects(self):
for ep in [
"foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2",
]:
try: EntryPoint.parse(ep)
except ValueError: pass
else: raise AssertionError("Should've been bad", ep)
def checkSubMap(self, m):
self.assertEqual(len(m), len(self.submap_expect))
for key, ep in self.submap_expect.iteritems():
self.assertEqual(repr(m.get(key)), repr(ep))
submap_expect = dict(
feature1=EntryPoint('feature1', 'somemodule', ['somefunction']),
feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']),
feature3=EntryPoint('feature3', 'this.module', extras=['something'])
)
submap_str = """
# define features for blah blah
feature1 = somemodule:somefunction
feature2 = another.module:SomeClass [extra1,extra2]
feature3 = this.module [something]
"""
def testParseList(self):
self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str))
self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar")
self.assertRaises(ValueError, EntryPoint.parse_group, "x",
["foo=baz", "foo=bar"])
def testParseMap(self):
m = EntryPoint.parse_map({'xyz':self.submap_str})
self.checkSubMap(m['xyz'])
self.assertEqual(m.keys(),['xyz'])
m = EntryPoint.parse_map("[xyz]\n"+self.submap_str)
self.checkSubMap(m['xyz'])
self.assertEqual(m.keys(),['xyz'])
self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"])
self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str)
class RequirementsTests(TestCase):
def testBasics(self):
r = Requirement.parse("Twisted>=1.2")
self.assertEqual(str(r),"Twisted>=1.2")
self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')")
self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ()))
self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ()))
self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2"))
def testOrdering(self):
r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ())
r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ())
self.assertEqual(r1,r2)
self.assertEqual(str(r1),str(r2))
self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2")
def testBasicContains(self):
r = Requirement("Twisted", [('>=','1.2')], ())
foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg")
twist11 = Distribution.from_filename("Twisted-1.1.egg")
twist12 = Distribution.from_filename("Twisted-1.2.egg")
self.assert_(parse_version('1.2') in r)
self.assert_(parse_version('1.1') not in r)
self.assert_('1.2' in r)
self.assert_('1.1' not in r)
self.assert_(foo_dist not in r)
self.assert_(twist11 not in r)
self.assert_(twist12 in r)
def testAdvancedContains(self):
r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5")
for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'):
self.assert_(v in r, (v,r))
for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'):
self.assert_(v not in r, (v,r))
def testOptionsAndHashing(self):
r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0")
self.assertEqual(r1,r2)
self.assertEqual(r1,r3)
self.assertEqual(r1.extras, ("foo","bar"))
self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized
self.assertEqual(hash(r1), hash(r2))
self.assertEqual(
hash(r1), hash(("twisted", ((">=",parse_version("1.2")),),
frozenset(["foo","bar"])))
)
def testVersionEquality(self):
r1 = Requirement.parse("foo==0.3a2")
r2 = Requirement.parse("foo!=0.3a4")
d = Distribution.from_filename
self.assert_(d("foo-0.3a4.egg") not in r1)
self.assert_(d("foo-0.3a1.egg") not in r1)
self.assert_(d("foo-0.3a4.egg") not in r2)
self.assert_(d("foo-0.3a2.egg") in r1)
self.assert_(d("foo-0.3a2.egg") in r2)
self.assert_(d("foo-0.3a3.egg") in r2)
self.assert_(d("foo-0.3a5.egg") in r2)
def testDistributeSetuptoolsOverride(self):
# Plain setuptools or distribute mean we return distribute.
self.assertEqual(
Requirement.parse('setuptools').project_name, 'distribute')
self.assertEqual(
Requirement.parse('distribute').project_name, 'distribute')
# setuptools lower than 0.7 means distribute
self.assertEqual(
Requirement.parse('setuptools==0.6c9').project_name, 'distribute')
self.assertEqual(
Requirement.parse('setuptools==0.6c10').project_name, 'distribute')
self.assertEqual(
Requirement.parse('setuptools>=0.6').project_name, 'distribute')
self.assertEqual(
Requirement.parse('setuptools < 0.7').project_name, 'distribute')
# setuptools 0.7 and higher means setuptools.
self.assertEqual(
Requirement.parse('setuptools == 0.7').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools == 0.7a1').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools >= 0.7').project_name, 'setuptools')
class ParseTests(TestCase):
def testEmptyParse(self):
self.assertEqual(list(parse_requirements('')), [])
def testYielding(self):
for inp,out in [
([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']),
(['x\n\n','y'], ['x','y']),
]:
self.assertEqual(list(pkg_resources.yield_lines(inp)),out)
def testSplitting(self):
self.assertEqual(
list(
pkg_resources.split_sections("""
x
[Y]
z
a
[b ]
# foo
c
[ d]
[q]
v
"""
)
),
[(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])]
)
self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo"))
def testSafeName(self):
self.assertEqual(safe_name("adns-python"), "adns-python")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker")
self.assertNotEqual(safe_name("peak.web"), "peak-web")
def testSafeVersion(self):
self.assertEqual(safe_version("1.2-1"), "1.2-1")
self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha")
self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521")
self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker")
self.assertEqual(safe_version("peak.web"), "peak.web")
def testSimpleRequirements(self):
self.assertEqual(
list(parse_requirements('Twis-Ted>=1.2-1')),
[Requirement('Twis-Ted',[('>=','1.2-1')], ())]
)
self.assertEqual(
list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')),
[Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())]
)
self.assertEqual(
Requirement.parse("FooBar==1.99a3"),
Requirement("FooBar", [('==','1.99a3')], ())
)
self.assertRaises(ValueError,Requirement.parse,">=2.3")
self.assertRaises(ValueError,Requirement.parse,"x\\")
self.assertRaises(ValueError,Requirement.parse,"x==2 q")
self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2")
self.assertRaises(ValueError,Requirement.parse,"#")
def testVersionEquality(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assertEqual(p1,p2, (s1,s2,p1,p2))
c('0.4', '0.4.0')
c('0.4.0.0', '0.4.0')
c('0.4.0-0', '0.4-0')
c('0pl1', '0.0pl1')
c('0pre1', '0.0c1')
c('0.0.0preview1', '0c1')
c('0.0c1', '0rc1')
c('1.2a1', '1.2.a.1'); c('1.2...a', '1.2a')
def testVersionOrdering(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assert_(p1<p2, (s1,s2,p1,p2))
c('2.1','2.1.1')
c('2.1.0','2.10')
c('2a1','2b0')
c('2b1','2c0')
c('2a1','2.1')
c('2.3a1', '2.3')
c('2.1-1', '2.1-2')
c('2.1-1', '2.1.1')
c('2.1', '2.1.1-1')
c('2.1', '2.1pl4')
c('2.1a0-20040501', '2.1')
c('1.1', '02.1')
c('A56','B27')
c('3.2', '3.2.pl0')
c('3.2-1', '3.2pl1')
c('3.2pl1', '3.2pl1-1')
c('0.4', '4.0')
c('0.0.4', '0.4.0')
c('0pl1', '0.4pl1')
c('2.1dev','2.1a0')
c('2.1.0rc1','2.1.0')
c('2.1.0','2.1.0-rc0')
c('2.1.0','2.1.0-a')
c('2.1.0','2.1.0-alpha')
c('2.1.0','2.1.0-foo')
c('1.0','1.0-1')
c('1.0-1','1.0.1')
c('1.0a','1.0b')
c('1.0dev','1.0rc1')
c('1.0pre','1.0')
c('1.0pre','1.0')
c('1.0a','1.0-a')
c('1.0rc1','1.0-rc1')
torture ="""
0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1
0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2
0.77.2-1 0.77.1-1 0.77.0-1
""".split()
for p,v1 in enumerate(torture):
for v2 in torture[p+1:]:
c(v2,v1)
class ScriptHeaderTests(TestCase):
non_ascii_exe = '/Users/José/bin/python'
def test_get_script_header(self):
if not sys.platform.startswith('java') or not is_sh(sys.executable):
# This test is for non-Jython platforms
self.assertEqual(get_script_header('#!/usr/local/bin/python'),
'#!%s\n' % os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/bin/python -x'),
'#!%s -x\n' % os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
def test_get_script_header_jython_workaround(self):
# This test doesn't work with Python 3 in some locales
if (sys.version_info >= (3,) and os.environ.get("LC_CTYPE")
in (None, "C", "POSIX")):
return
platform = sys.platform
sys.platform = 'java1.5.0_13'
stdout = sys.stdout
try:
# A mock sys.executable that uses a shebang line (this file)
exe = os.path.normpath(os.path.splitext(__file__)[0] + '.py')
self.assertEqual(
get_script_header('#!/usr/local/bin/python', executable=exe),
'#!/usr/bin/env %s\n' % exe)
# Ensure we generate what is basically a broken shebang line
# when there's options, with a warning emitted
sys.stdout = sys.stderr = StringIO.StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python -x',
executable=exe),
'#!%s -x\n' % exe)
self.assert_('Unable to adapt shebang line' in sys.stdout.getvalue())
sys.stdout = sys.stderr = StringIO.StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
self.assert_('Unable to adapt shebang line' in sys.stdout.getvalue())
finally:
sys.platform = platform
sys.stdout = stdout
class NamespaceTests(TestCase):
def setUp(self):
self._ns_pkgs = pkg_resources._namespace_packages.copy()
self._tmpdir = tempfile.mkdtemp(prefix="tests-distribute-")
os.makedirs(os.path.join(self._tmpdir, "site-pkgs"))
self._prev_sys_path = sys.path[:]
sys.path.append(os.path.join(self._tmpdir, "site-pkgs"))
def tearDown(self):
shutil.rmtree(self._tmpdir)
pkg_resources._namespace_packages = self._ns_pkgs.copy()
sys.path = self._prev_sys_path[:]
def _assertIn(self, member, container):
""" assertIn and assertTrue does not exist in Python2.3"""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def test_two_levels_deep(self):
"""
Test nested namespace packages
Create namespace packages in the following tree :
site-packages-1/pkg1/pkg2
site-packages-2/pkg1/pkg2
Check both are in the _namespace_packages dict and that their __path__
is correct
"""
sys.path.append(os.path.join(self._tmpdir, "site-pkgs2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2"))
ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n"
for site in ["site-pkgs", "site-pkgs2"]:
pkg1_init = open(os.path.join(self._tmpdir, site,
"pkg1", "__init__.py"), "w")
pkg1_init.write(ns_str)
pkg1_init.close()
pkg2_init = open(os.path.join(self._tmpdir, site,
"pkg1", "pkg2", "__init__.py"), "w")
pkg2_init.write(ns_str)
pkg2_init.close()
import pkg1
self._assertIn("pkg1", pkg_resources._namespace_packages.keys())
try:
import pkg1.pkg2
except ImportError, e:
self.fail("Distribute tried to import the parent namespace package")
# check the _namespace_packages dict
self._assertIn("pkg1.pkg2", pkg_resources._namespace_packages.keys())
self.assertEqual(pkg_resources._namespace_packages["pkg1"], ["pkg1.pkg2"])
# check the __path__ attribute contains both paths
self.assertEqual(pkg1.pkg2.__path__, [
os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"),
os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2") ])
|
gpl-3.0
|
anixe/anixe-gitosis
|
gitosis/init.py
|
19
|
4219
|
"""
Initialize a user account for use with gitosis.
"""
import errno
import logging
import os
import sys
from pkg_resources import resource_filename
from cStringIO import StringIO
from ConfigParser import RawConfigParser
from gitosis import repository
from gitosis import run_hook
from gitosis import ssh
from gitosis import util
from gitosis import app
log = logging.getLogger('gitosis.init')
def read_ssh_pubkey(fp=None):
if fp is None:
fp = sys.stdin
line = fp.readline()
return line
class InsecureSSHKeyUsername(Exception):
"""Username contains not allowed characters"""
def __str__(self):
return '%s: %s' % (self.__doc__, ': '.join(self.args))
def ssh_extract_user(pubkey):
_, user = pubkey.rsplit(None, 1)
if ssh.isSafeUsername(user):
return user
else:
raise InsecureSSHKeyUsername(repr(user))
def initial_commit(git_dir, cfg, pubkey, user):
repository.fast_import(
git_dir=git_dir,
commit_msg='Automatic creation of gitosis repository.',
committer='Gitosis Admin <%s>' % user,
files=[
('keydir/%s.pub' % user, pubkey),
('gitosis.conf', cfg),
],
)
def symlink_config(git_dir):
dst = os.path.expanduser('~/.gitosis.conf')
tmp = '%s.%d.tmp' % (dst, os.getpid())
try:
os.unlink(tmp)
except OSError, e:
if e.errno == errno.ENOENT:
pass
else:
raise
os.symlink(
os.path.join(git_dir, 'gitosis.conf'),
tmp,
)
os.rename(tmp, dst)
def init_admin_repository(
git_dir,
pubkey,
user,
):
repository.init(
path=git_dir,
template=resource_filename('gitosis.templates', 'admin')
)
repository.init(
path=git_dir,
)
if not repository.has_initial_commit(git_dir):
log.info('Making initial commit...')
# ConfigParser does not guarantee order, so jump through hoops
# to make sure [gitosis] is first
cfg_file = StringIO()
print >>cfg_file, '[gitosis]'
print >>cfg_file
cfg = RawConfigParser()
cfg.add_section('group gitosis-admin')
cfg.set('group gitosis-admin', 'members', user)
cfg.set('group gitosis-admin', 'writable', 'gitosis-admin')
cfg.write(cfg_file)
initial_commit(
git_dir=git_dir,
cfg=cfg_file.getvalue(),
pubkey=pubkey,
user=user,
)
class Main(app.App):
def create_parser(self):
parser = super(Main, self).create_parser()
parser.set_usage('%prog [OPTS]')
parser.set_description(
'Initialize a user account for use with gitosis')
return parser
def read_config(self, *a, **kw):
# ignore errors that result from non-existent config file
try:
super(Main, self).read_config(*a, **kw)
except app.ConfigFileDoesNotExistError:
pass
def handle_args(self, parser, cfg, options, args):
super(Main, self).handle_args(parser, cfg, options, args)
os.umask(0022)
log.info('Reading SSH public key...')
pubkey = read_ssh_pubkey()
user = ssh_extract_user(pubkey)
if user is None:
log.error('Cannot parse user from SSH public key.')
sys.exit(1)
log.info('Admin user is %r', user)
log.info('Creating generated files directory...')
generated = util.getGeneratedFilesDir(config=cfg)
util.mkdir(generated)
log.info('Creating repository structure...')
repositories = util.getRepositoryDir(cfg)
util.mkdir(repositories)
admin_repository = os.path.join(repositories, 'gitosis-admin.git')
init_admin_repository(
git_dir=admin_repository,
pubkey=pubkey,
user=user,
)
log.info('Running post-update hook...')
util.mkdir(os.path.expanduser('~/.ssh'), 0700)
run_hook.post_update(cfg=cfg, git_dir=admin_repository)
log.info('Symlinking ~/.gitosis.conf to repository...')
symlink_config(git_dir=admin_repository)
log.info('Done.')
|
gpl-2.0
|
jfmartinez64/test
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wat.py
|
35
|
5369
|
# coding: utf-8
from __future__ import unicode_literals
import re
import hashlib
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate,
)
class WatIE(InfoExtractor):
_VALID_URL = r'http://www\.wat\.tv/video/(?P<display_id>.*)-(?P<short_id>.*?)_.*?\.html'
IE_NAME = 'wat.tv'
_TESTS = [
{
'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html',
'md5': 'ce70e9223945ed26a8056d413ca55dc9',
'info_dict': {
'id': '11713067',
'display_id': 'soupe-figues-l-orange-aux-epices',
'ext': 'mp4',
'title': 'Soupe de figues à l\'orange et aux épices',
'description': 'Retrouvez l\'émission "Petits plats en équilibre", diffusée le 18 août 2014.',
'upload_date': '20140819',
'duration': 120,
},
},
{
'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html',
'md5': 'fbc84e4378165278e743956d9c1bf16b',
'info_dict': {
'id': '11713075',
'display_id': 'gregory-lemarchal-voix-ange',
'ext': 'mp4',
'title': 'Grégory Lemarchal, une voix d\'ange depuis 10 ans (1/3)',
'description': 'md5:b7a849cf16a2b733d9cd10c52906dee3',
'upload_date': '20140816',
'duration': 2910,
},
'skip': "Ce contenu n'est pas disponible pour l'instant.",
},
]
def download_video_info(self, real_id):
# 'contentv4' is used in the website, but it also returns the related
# videos, we don't need them
info = self._download_json('http://www.wat.tv/interface/contentv3/' + real_id, real_id)
return info['media']
def _real_extract(self, url):
def real_id_for_chapter(chapter):
return chapter['tc_start'].split('-')[0]
mobj = re.match(self._VALID_URL, url)
short_id = mobj.group('short_id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id or short_id)
real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id')
video_info = self.download_video_info(real_id)
error_desc = video_info.get('error_desc')
if error_desc:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_desc), expected=True)
geo_list = video_info.get('geoList')
country = geo_list[0] if geo_list else ''
chapters = video_info['chapters']
first_chapter = chapters[0]
files = video_info['files']
first_file = files[0]
if real_id_for_chapter(first_chapter) != real_id:
self.to_screen('Multipart video detected')
chapter_urls = []
for chapter in chapters:
chapter_id = real_id_for_chapter(chapter)
# Yes, when we this chapter is processed by WatIE,
# it will download the info again
chapter_info = self.download_video_info(chapter_id)
chapter_urls.append(chapter_info['url'])
entries = [self.url_result(chapter_url) for chapter_url in chapter_urls]
return self.playlist_result(entries, real_id, video_info['title'])
upload_date = None
if 'date_diffusion' in first_chapter:
upload_date = unified_strdate(first_chapter['date_diffusion'])
# Otherwise we can continue and extract just one part, we have to use
# the short id for getting the video url
formats = [{
'url': 'http://wat.tv/get/android5/%s.mp4' % real_id,
'format_id': 'Mobile',
}]
fmts = [('SD', 'web')]
if first_file.get('hasHD'):
fmts.append(('HD', 'webhd'))
def compute_token(param):
timestamp = '%08x' % int(self._download_webpage(
'http://www.wat.tv/servertime', real_id,
'Downloading server time').split('|')[0])
magic = '9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564'
return '%s/%s' % (hashlib.md5((magic + param + timestamp).encode('ascii')).hexdigest(), timestamp)
for fmt in fmts:
webid = '/%s/%s' % (fmt[1], real_id)
video_url = self._download_webpage(
'http://www.wat.tv/get%s?token=%s&getURL=1&country=%s' % (webid, compute_token(webid), country),
real_id,
'Downloding %s video URL' % fmt[0],
'Failed to download %s video URL' % fmt[0],
False)
if not video_url:
continue
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': fmt[0],
})
return {
'id': real_id,
'display_id': display_id,
'title': first_chapter['title'],
'thumbnail': first_chapter['preview'],
'description': first_chapter['description'],
'view_count': video_info['views'],
'upload_date': upload_date,
'duration': first_file['duration'],
'formats': formats,
}
|
gpl-3.0
|
FrankBian/kuma
|
kuma/users/backends.py
|
5
|
1131
|
import hashlib
from django.contrib.auth.hashers import BasePasswordHasher, mask_hash
from django.utils.crypto import constant_time_compare
from django.utils.datastructures import SortedDict
from tower import ugettext as _
class Sha256Hasher(BasePasswordHasher):
"""
SHA-256 password hasher.
"""
algorithm = 'sha256'
digest = hashlib.sha256
def encode(self, password, salt):
assert password
assert salt and '$' not in salt
hash = self.digest(salt + password).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
|
mpl-2.0
|
saquiba2/numpy2
|
numpy/polynomial/polynomial.py
|
126
|
48268
|
"""
Objects for dealing with polynomials.
This module provides a number of objects (mostly functions) useful for
dealing with polynomials, including a `Polynomial` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with polynomial objects is in
the docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `polydomain` -- Polynomial default domain, [-1,1].
- `polyzero` -- (Coefficients of the) "zero polynomial."
- `polyone` -- (Coefficients of the) constant polynomial 1.
- `polyx` -- (Coefficients of the) identity map polynomial, ``f(x) = x``.
Arithmetic
----------
- `polyadd` -- add two polynomials.
- `polysub` -- subtract one polynomial from another.
- `polymul` -- multiply two polynomials.
- `polydiv` -- divide one polynomial by another.
- `polypow` -- raise a polynomial to an positive integer power
- `polyval` -- evaluate a polynomial at given points.
- `polyval2d` -- evaluate a 2D polynomial at given points.
- `polyval3d` -- evaluate a 3D polynomial at given points.
- `polygrid2d` -- evaluate a 2D polynomial on a Cartesian product.
- `polygrid3d` -- evaluate a 3D polynomial on a Cartesian product.
Calculus
--------
- `polyder` -- differentiate a polynomial.
- `polyint` -- integrate a polynomial.
Misc Functions
--------------
- `polyfromroots` -- create a polynomial with specified roots.
- `polyroots` -- find the roots of a polynomial.
- `polyvander` -- Vandermonde-like matrix for powers.
- `polyvander2d` -- Vandermonde-like matrix for 2D power series.
- `polyvander3d` -- Vandermonde-like matrix for 3D power series.
- `polycompanion` -- companion matrix in power series form.
- `polyfit` -- least-squares fit returning a polynomial.
- `polytrim` -- trim leading coefficients from a polynomial.
- `polyline` -- polynomial representing given straight line.
Classes
-------
- `Polynomial` -- polynomial class.
See Also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
__all__ = [
'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd',
'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval',
'polyder', 'polyint', 'polyfromroots', 'polyvander', 'polyfit',
'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d',
'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d']
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
polytrim = pu.trimcoef
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Polynomial default domain.
polydomain = np.array([-1, 1])
# Polynomial coefficients representing zero.
polyzero = np.array([0])
# Polynomial coefficients representing one.
polyone = np.array([1])
# Polynomial coefficients representing the identity x.
polyx = np.array([0, 1])
#
# Polynomial series functions
#
def polyline(off, scl):
"""
Returns an array representing a linear polynomial.
Parameters
----------
off, scl : scalars
The "y-intercept" and "slope" of the line, respectively.
Returns
-------
y : ndarray
This module's representation of the linear polynomial ``off +
scl*x``.
See Also
--------
chebline
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> P.polyline(1,-1)
array([ 1, -1])
>>> P.polyval(1, P.polyline(1,-1)) # should be 0
0.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def polyfromroots(roots):
"""
Generate a monic polynomial with given roots.
Return the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
where the `r_n` are the roots specified in `roots`. If a zero has
multiplicity n, then it must appear in `roots` n times. For instance,
if 2 is a root of multiplicity three and 3 is a root of multiplicity 2,
then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear
in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * x + ... + x^n
The coefficient of the last term is 1 for monic polynomials in this
form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of the polynomial's coefficients If all the roots are
real, then `out` is also real, otherwise it is complex. (see
Examples below).
See Also
--------
chebfromroots, legfromroots, lagfromroots, hermfromroots
hermefromroots
Notes
-----
The coefficients are determined by multiplying together linear factors
of the form `(x - r_i)`, i.e.
.. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n)
where ``n == len(roots) - 1``; note that this implies that `1` is always
returned for :math:`a_n`.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x
array([ 0., -1., 0., 1.])
>>> j = complex(0,1)
>>> P.polyfromroots((-j,j)) # complex returned, though values are real
array([ 1.+0.j, 0.+0.j, 1.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [polyline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [polymul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = polymul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def polyadd(c1, c2):
"""
Add one polynomial to another.
Returns the sum of two polynomials `c1` + `c2`. The arguments are
sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
out : ndarray
The coefficient array representing their sum.
See Also
--------
polysub, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> sum = P.polyadd(c1,c2); sum
array([ 4., 4., 4.])
>>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
28.0
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polysub(c1, c2):
"""
Subtract one polynomial from another.
Returns the difference of two polynomials `c1` - `c2`. The arguments
are sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of coefficients representing their difference.
See Also
--------
polyadd, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polysub(c1,c2)
array([-2., 0., 2.])
>>> P.polysub(c2,c1) # -P.polysub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polymulx(c):
"""Multiply a polynomial by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1:] = c
return prd
def polymul(c1, c2):
"""
Multiply one polynomial by another.
Returns the product of two polynomials `c1` * `c2`. The arguments are
sequences of coefficients, from lowest order term to highest, e.g.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.``
Parameters
----------
c1, c2 : array_like
1-D arrays of coefficients representing a polynomial, relative to the
"standard" basis, and ordered from lowest order term to highest.
Returns
-------
out : ndarray
Of the coefficients of their product.
See Also
--------
polyadd, polysub, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polymul(c1,c2)
array([ 3., 8., 14., 8., 3.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
ret = np.convolve(c1, c2)
return pu.trimseq(ret)
def polydiv(c1, c2):
"""
Divide one polynomial by another.
Returns the quotient-with-remainder of two polynomials `c1` / `c2`.
The arguments are sequences of coefficients, from lowest order term
to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
[quo, rem] : ndarrays
Of coefficient series representing the quotient and remainder.
See Also
--------
polyadd, polysub, polymul, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polydiv(c1,c2)
(array([ 3.]), array([-8., -4.]))
>>> P.polydiv(c2,c1)
(array([ 0.33333333]), array([ 2.66666667, 1.33333333]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
len1 = len(c1)
len2 = len(c2)
if len2 == 1:
return c1/c2[-1], c1[:1]*0
elif len1 < len2:
return c1[:1]*0, c1
else:
dlen = len1 - len2
scl = c2[-1]
c2 = c2[:-1]/scl
i = dlen
j = len1 - 1
while i >= 0:
c1[i:j] -= c2*c1[j]
i -= 1
j -= 1
return c1[j+1:]/scl, pu.trimseq(c1[:j+1])
def polypow(c, pow, maxpower=None):
"""Raise a polynomial to a power.
Returns the polynomial `c` raised to the power `pow`. The argument
`c` is a sequence of coefficients ordered from low to high. i.e.,
[1,2,3] is the series ``1 + 2*x + 3*x**2.``
Parameters
----------
c : array_like
1-D array of array of series coefficients ordered from low to
high degree.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Power series of power.
See Also
--------
polyadd, polysub, polymul, polydiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = np.convolve(prd, c)
return prd
def polyder(c, m=1, scl=1, axis=0):
"""
Differentiate a polynomial.
Returns the polynomial coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The
argument `c` is an array of coefficients from low to high degree along
each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``
while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is
``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of polynomial coefficients. If c is multidimensional the
different axis correspond to different variables with the degree
in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change
of variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Polynomial coefficients of the derivative.
See Also
--------
polyint
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3
>>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2
array([ 2., 6., 12.])
>>> P.polyder(c,3) # (d**3/dx**3)(c) = 24
array([ 24.])
>>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2
array([ -2., -6., -12.])
>>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x
array([ 6., 24.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
cdt = c.dtype
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=cdt)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a polynomial.
Returns the polynomial coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients, from low to high degree along each axis, e.g., [1,2,3]
represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]]
represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
1-D array of polynomial coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Coefficient array of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``.
See Also
--------
polyder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`. Why
is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3)
>>> P.polyint(c) # should return array([0, 1, 1, 1])
array([ 0., 1., 1., 1.])
>>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])
array([ 0. , 0. , 0. , 0.16666667, 0.08333333,
0.05 ])
>>> P.polyint(c,k=3) # should return array([3, 1, 1, 1])
array([ 3., 1., 1., 1.])
>>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1])
array([ 6., 1., 1., 1.])
>>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2])
array([ 0., -2., -2., -2.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype doesn't preserve mask attribute.
c = c + 0.0
cdt = c.dtype
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
k = list(k) + [0]*(cnt - len(k))
c = np.rollaxis(c, iaxis)
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - polyval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def polyval(x, c, tensor=True):
"""
Evaluate a polynomial at points x.
If `c` is of length `n + 1`, this function returns the value
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, compatible object
The shape of the returned array is described above.
See Also
--------
polyval2d, polygrid2d, polyval3d, polygrid3d
Notes
-----
The evaluation uses Horner's method.
Examples
--------
>>> from numpy.polynomial.polynomial import polyval
>>> polyval(1, [1,2,3])
6.0
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> polyval(a, [1,2,3])
array([[ 1., 6.],
[ 17., 34.]])
>>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients
>>> coef
array([[0, 1],
[2, 3]])
>>> polyval([1,2], coef, tensor=True)
array([[ 2., 4.],
[ 4., 7.]])
>>> polyval([1,2], coef, tensor=False)
array([ 2., 7.])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
c0 = c[-1] + x*0
for i in range(2, len(c) + 1):
c0 = c[-i] + c0*x
return c0
def polyval2d(x, y, c):
"""
Evaluate a 2-D polynomial at points (x, y).
This function returns the value
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in `c[i,j]`. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
polyval, polygrid2d, polyval3d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = polyval(x, c)
c = polyval(y, c, tensor=False)
return c
def polygrid2d(x, y, c):
"""
Evaluate a 2-D polynomial on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polyval3d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = polyval(x, c)
c = polyval(y, c)
return c
def polyval3d(x, y, z, c):
"""
Evaluate a 3-D polynomial at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
polyval, polyval2d, polygrid2d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = polyval(x, c)
c = polyval(y, c, tensor=False)
c = polyval(z, c, tensor=False)
return c
def polygrid3d(x, y, z, c):
"""
Evaluate a 3-D polynomial on the Cartesian product of x, y and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polygrid2d, polyval3d
Notes
-----
.. versionadded:: 1.7.0
"""
c = polyval(x, c)
c = polyval(y, c)
c = polyval(z, c)
return c
def polyvander(x, deg):
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points
`x`. The Vandermonde matrix is defined by
.. math:: V[..., i] = x^i,
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the power of `x`.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and
``polyval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of polynomials of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray.
The Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where the last index is the power of `x`.
The dtype will be the same as the converted `x`.
See Also
--------
polyvander2d, polyvander3d
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x
return np.rollaxis(v, 0, v.ndim)
def polyvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = x^i * y^j,
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the powers of
`x` and `y`.
If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D polynomials
of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
polyvander, polyvander3d. polyval2d, polyval3d
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = polyvander(x, degx)
vy = polyvander(y, degy)
v = vx[..., None]*vy[..., None,:]
# einsum bug
#v = np.einsum("...i,...j->...ij", vx, vy)
return v.reshape(v.shape[:-2] + (-1,))
def polyvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k,
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the powers of `x`, `y`, and `z`.
If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D polynomials
of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
polyvander, polyvander3d. polyval2d, polyval3d
Notes
-----
.. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = polyvander(x, degx)
vy = polyvander(y, degy)
vz = polyvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
# einsum bug
#v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz)
return v.reshape(v.shape[:-3] + (-1,))
def polyfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least-squares fit of a polynomial to data.
Return the coefficients of a polynomial of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,
where `n` is `deg`.
Parameters
----------
x : array_like, shape (`M`,)
x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.
y : array_like, shape (`M`,) or (`M`, `K`)
y-coordinates of the sample points. Several sets of sample points
sharing the same x-coordinates can be (independently) fit with one
call to `polyfit` by passing in for `y` a 2-D array that contains
one data set per column.
deg : int
Degree of the polynomial(s) to be fit.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than `rcond`, relative to the largest singular value, will be
ignored. The default value is ``len(x)*eps``, where `eps` is the
relative precision of the platform's float type, about 2e-16 in
most cases.
full : bool, optional
Switch determining the nature of the return value. When ``False``
(the default) just the coefficients are returned; when ``True``,
diagnostic information from the singular value decomposition (used
to solve the fit's matrix equation) is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)
Polynomial coefficients ordered from low to high. If `y` was 2-D,
the coefficients in column `k` of `coef` represent the polynomial
fit to the data in `y`'s `k`-th column.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Raises
------
RankWarning
Raised if the matrix in the least-squares fit is rank deficient.
The warning is only raised if `full` == False. The warnings can
be turned off by:
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, hermfit, hermefit
polyval : Evaluates a polynomial.
polyvander : Vandermonde matrix for powers.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the polynomial `p` that minimizes
the sum of the weighted squared errors
.. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) over-determined matrix equation:
.. math :: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected (and `full` == ``False``), a `RankWarning` will be raised.
This means that the coefficient values may be poorly determined.
Fitting to a lower order polynomial will usually get rid of the warning
(but may not be what you want, of course; if you have independent
reason(s) for choosing the degree which isn't working, you may have to:
a) reconsider those reasons, and/or b) reconsider the quality of your
data). The `rcond` parameter can also be set to a value smaller than
its default, but the resulting fit may be spurious and have large
contributions from roundoff error.
Polynomial fits using double precision tend to "fail" at about
(polynomial) degree 20. Fits using Chebyshev or Legendre series are
generally better conditioned, but much can still depend on the
distribution of the sample points and the smoothness of the data. If
the quality of the fit is inadequate, splines may be a good
alternative.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
>>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise"
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286])
>>> stats # note the large SSR, explaining the rather poor results
[array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316,
0.28853036]), 1.1324274851176597e-014]
Same thing without the added noise
>>> y = x**3 - x
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16,
1.00000000e+00])
>>> stats # note the minuscule SSR
[array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158,
0.50443316, 0.28853036]), 1.1324274851176597e-014]
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = polyvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def polycompanion(c):
"""
Return the companion matrix of c.
The companion matrix for power series cannot be made symmetric by
scaling the basis, so this function differs from those for the
orthogonal polynomials.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
bot = mat.reshape(-1)[n::n+1]
bot[...] = 1
mat[:, -1] -= c[:-1]/c[-1]
return mat
def polyroots(c):
"""
Compute the roots of a polynomial.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * x^i.
Parameters
----------
c : 1-D array_like
1-D array of polynomial coefficients.
Returns
-------
out : ndarray
Array of the roots of the polynomial. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the power series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
Examples
--------
>>> import numpy.polynomial.polynomial as poly
>>> poly.polyroots(poly.polyfromroots((-1,0,1)))
array([-1., 0., 1.])
>>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype
dtype('float64')
>>> j = complex(0,1)
>>> poly.polyroots(poly.polyfromroots((-j,0,j)))
array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = polycompanion(c)
r = la.eigvals(m)
r.sort()
return r
#
# polynomial class
#
class Polynomial(ABCPolyBase):
"""A power series class.
The Polynomial class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Polynomial coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(polyadd)
_sub = staticmethod(polysub)
_mul = staticmethod(polymul)
_div = staticmethod(polydiv)
_pow = staticmethod(polypow)
_val = staticmethod(polyval)
_int = staticmethod(polyint)
_der = staticmethod(polyder)
_fit = staticmethod(polyfit)
_line = staticmethod(polyline)
_roots = staticmethod(polyroots)
_fromroots = staticmethod(polyfromroots)
# Virtual properties
nickname = 'poly'
domain = np.array(polydomain)
window = np.array(polydomain)
|
bsd-3-clause
|
mtp401/airflow
|
airflow/www/forms.py
|
51
|
1466
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from flask_admin.form import DateTimePickerWidget
from wtforms import DateTimeField, SelectField
from flask_wtf import Form
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class DateTimeWithNumRunsForm(Form):
# Date time and number of runs form for tree view, task duration
# and landing times
base_date = DateTimeField(
"Anchor date", widget=DateTimePickerWidget(), default=datetime.now())
num_runs = SelectField("Number of runs", default=25, choices=(
(5, "5"),
(25, "25"),
(50, "50"),
(100, "100"),
(365, "365"),
))
|
apache-2.0
|
mne-tools/mne-tools.github.io
|
0.11/_downloads/plot_estimate_covariance_matrix_baseline.py
|
22
|
1854
|
"""
===============================================
Estimate covariance matrix from Epochs baseline
===============================================
We first define a set of Epochs from events and a raw file.
Then we estimate the noise covariance of prestimulus data,
a.k.a. baseline.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
raw = io.Raw(fname)
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
raw.info['bads'] += ['EEG 053'] # bads + 1 more
# pick EEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
include=include, exclude='bads')
# Read epochs, with proj off by default so we can plot either way later
reject = dict(grad=4000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, proj=False)
# Compute the covariance on baseline
cov = mne.compute_covariance(epochs, tmin=None, tmax=0)
print(cov)
###############################################################################
# Show covariance
mne.viz.plot_cov(cov, raw.info, colorbar=True, proj=True)
# try setting proj to False to see the effect
|
bsd-3-clause
|
kingvuplus/PB-gui
|
lib/actions/parseactions.py
|
106
|
1900
|
# takes a header file, outputs action ids
import tokenize, sys, string
def filter(g):
while 1:
t = g.next()
if t[1] == "/*":
while g.next()[1] != "*/":
pass
continue
if t[1] == "//":
while g.next()[1] != "\n":
pass
continue
if t[1] != "\n":
# print t
yield t[1]
def do_file(f, mode):
tokens = filter(tokenize.generate_tokens(open(f, 'r').readline))
sys.stderr.write("parsing %s\n" % f)
state = 0
classstate = 0
firsthit = 1
while 1:
try:
t = tokens.next()
except:
break
if t == "class":
classname = tokens.next()
classstate = state
if t == "{":
state += 1
if t == "}":
state -= 1
if t == "enum" and state == classstate + 1:
actionname = tokens.next()
if actionname == "{":
while tokens.next() != "}":
pass
continue
if actionname[-7:] == "Actions":
if tokens.next() != "{":
try:
print classname
except:
pass
try:
print actionname
except:
pass
raise Exception("action enum must be simple.")
counter = 0
while 1:
t = tokens.next()
if t == "=":
tokens.next()
t = tokens.next()
if t == "}":
break
if counter:
if t != ",":
raise Exception("no comma")
t = tokens.next()
if firsthit:
if mode == "include":
# hack hack hack!!
print "#include <lib/" + '/'.join(f.split('/')[-2:]) + ">"
else:
print "\t// " + f
firsthit = 0
if mode == "parse":
print "{\"" + actionname + "\", \"" + t + "\", " + string.join((classname, t), "::") + "},"
counter += 1
mode = sys.argv[1]
if mode == "parse":
print """
/* generated by parseactions.py - do not modify! */
struct eActionList
{
const char *m_context, *m_action;
int m_id;
} actions[]={"""
for x in sys.argv[2:]:
do_file(x, mode)
if mode == "parse":
print "};"
|
gpl-2.0
|
aajanki/youtube-dl
|
youtube_dl/extractor/twitch.py
|
9
|
13996
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
import random
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
parse_iso8601,
)
class TwitchBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'http://usher.twitch.tv'
_LOGIN_URL = 'https://secure.twitch.tv/user/login'
_LOGIN_POST_URL = 'https://secure-login.twitch.tv/login'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
def _download_json(self, url, video_id, note='Downloading JSON metadata'):
headers = {
'Referer': 'http://api.twitch.tv/crossdomain/receiver.html?v=2',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'api_token':
headers['Twitch-Api-Token'] = cookie.value
request = compat_urllib_request.Request(url, headers=headers)
response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
authenticity_token = self._search_regex(
r'<input name="authenticity_token" type="hidden" value="([^"]+)"',
login_page, 'authenticity token')
login_form = {
'utf8': '✓'.encode('utf-8'),
'authenticity_token': authenticity_token,
'redirect_on_login': '',
'embed_form': 'false',
'mp_source_action': 'login-button',
'follow': '',
'login': username,
'password': password,
}
request = compat_urllib_request.Request(
self._LOGIN_POST_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
m = re.search(
r"id=([\"'])login_error_message\1[^>]*>(?P<msg>[^<]+)", response)
if m:
raise ExtractorError(
'Unable to login: %s' % m.group('msg').strip(), expected=True)
def _prefer_source(self, formats):
try:
source = next(f for f in formats if f['format_id'] == 'Source')
source['preference'] = 10
except StopIteration:
pass # No Source stream present
self._sort_formats(formats)
class TwitchItemBaseIE(TwitchBaseIE):
def _download_info(self, item, item_id):
return self._extract_info(self._download_json(
'%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
'Downloading %s info JSON' % self._ITEM_TYPE))
def _extract_media(self, item_id):
info = self._download_info(self._ITEM_SHORTCUT, item_id)
response = self._download_json(
'%s/api/videos/%s%s' % (self._API_BASE, self._ITEM_SHORTCUT, item_id), item_id,
'Downloading %s playlist JSON' % self._ITEM_TYPE)
entries = []
chunks = response['chunks']
qualities = list(chunks.keys())
for num, fragment in enumerate(zip(*chunks.values()), start=1):
formats = []
for fmt_num, fragment_fmt in enumerate(fragment):
format_id = qualities[fmt_num]
fmt = {
'url': fragment_fmt['url'],
'format_id': format_id,
'quality': 1 if format_id == 'live' else 0,
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
entry = dict(info)
entry['id'] = '%s_%d' % (entry['id'], num)
entry['title'] = '%s part %d' % (entry['title'], num)
entry['formats'] = formats
entries.append(entry)
return self.playlist_result(entries, info['id'], info['title'])
def _extract_info(self, info):
return {
'id': info['_id'],
'title': info['title'],
'description': info['description'],
'duration': info['length'],
'thumbnail': info['preview'],
'uploader': info['channel']['display_name'],
'uploader_id': info['channel']['name'],
'timestamp': parse_iso8601(info['recorded_at']),
'view_count': info['views'],
}
def _real_extract(self, url):
return self._extract_media(self._match_id(url))
class TwitchVideoIE(TwitchItemBaseIE):
IE_NAME = 'twitch:video'
_VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'video'
_ITEM_SHORTCUT = 'a'
_TEST = {
'url': 'http://www.twitch.tv/riotgames/b/577357806',
'info_dict': {
'id': 'a577357806',
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
},
'playlist_mincount': 12,
}
class TwitchChapterIE(TwitchItemBaseIE):
IE_NAME = 'twitch:chapter'
_VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'chapter'
_ITEM_SHORTCUT = 'c'
_TESTS = [{
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
'info_dict': {
'id': 'c5285812',
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
},
'playlist_mincount': 3,
}, {
'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
'only_matching': True,
}]
class TwitchVodIE(TwitchItemBaseIE):
IE_NAME = 'twitch:vod'
_VALID_URL = r'%s/[^/]+/v/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'vod'
_ITEM_SHORTCUT = 'v'
_TEST = {
'url': 'http://www.twitch.tv/ksptv/v/3622000',
'info_dict': {
'id': 'v3622000',
'ext': 'mp4',
'title': '''KSPTV: Squadcast: "Everyone's on vacation so here's Dahud" Edition!''',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 6951,
'timestamp': 1419028564,
'upload_date': '20141219',
'uploader': 'KSPTV',
'uploader_id': 'ksptv',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
item_id = self._match_id(url)
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._download_json(
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
'Downloading %s access token' % self._ITEM_TYPE)
formats = self._extract_m3u8_formats(
'%s/vod/%s?nauth=%s&nauthsig=%s'
% (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
item_id, 'mp4')
self._prefer_source(formats)
info['formats'] = formats
return info
class TwitchPlaylistBaseIE(TwitchBaseIE):
_PLAYLIST_URL = '%s/kraken/channels/%%s/videos/?offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
_PAGE_LIMIT = 100
def _extract_playlist(self, channel_id):
info = self._download_json(
'%s/kraken/channels/%s' % (self._API_BASE, channel_id),
channel_id, 'Downloading channel info JSON')
channel_name = info.get('display_name') or info.get('name')
entries = []
offset = 0
limit = self._PAGE_LIMIT
for counter in itertools.count(1):
response = self._download_json(
self._PLAYLIST_URL % (channel_id, offset, limit),
channel_id, 'Downloading %s videos JSON page %d' % (self._PLAYLIST_TYPE, counter))
page_entries = self._extract_playlist_page(response)
if not page_entries:
break
entries.extend(page_entries)
offset += limit
return self.playlist_result(
[self.url_result(entry) for entry in set(entries)],
channel_id, channel_name)
def _extract_playlist_page(self, response):
videos = response.get('videos')
return [video['url'] for video in videos] if videos else []
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class TwitchProfileIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:profile'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_TYPE = 'profile'
_TEST = {
'url': 'http://www.twitch.tv/vanillatv/profile',
'info_dict': {
'id': 'vanillatv',
'title': 'VanillaTV',
},
'playlist_mincount': 412,
}
class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:past_broadcasts'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_URL = TwitchPlaylistBaseIE._PLAYLIST_URL + '&broadcasts=true'
_PLAYLIST_TYPE = 'past broadcasts'
_TEST = {
'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 54,
}
class TwitchBookmarksIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:bookmarks'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/bookmarks/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_URL = '%s/api/bookmark/?user=%%s&offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
_PLAYLIST_TYPE = 'bookmarks'
_TEST = {
'url': 'http://www.twitch.tv/ognos/profile/bookmarks',
'info_dict': {
'id': 'ognos',
'title': 'Ognos',
},
'playlist_mincount': 3,
}
def _extract_playlist_page(self, response):
entries = []
for bookmark in response.get('bookmarks', []):
video = bookmark.get('video')
if not video:
continue
entries.append(video['url'])
return entries
class TwitchStreamIE(TwitchBaseIE):
IE_NAME = 'twitch:stream'
_VALID_URL = r'%s/(?P<id>[^/]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_TEST = {
'url': 'http://www.twitch.tv/shroomztv',
'info_dict': {
'id': '12772022048',
'display_id': 'shroomztv',
'ext': 'mp4',
'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
'is_live': True,
'timestamp': 1421928037,
'upload_date': '20150122',
'uploader': 'ShroomzTV',
'uploader_id': 'shroomztv',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
channel_id = self._match_id(url)
stream = self._download_json(
'%s/kraken/streams/%s' % (self._API_BASE, channel_id), channel_id,
'Downloading stream JSON').get('stream')
# Fallback on profile extraction if stream is offline
if not stream:
return self.url_result(
'http://www.twitch.tv/%s/profile' % channel_id,
'TwitchProfile', channel_id)
access_token = self._download_json(
'%s/api/channels/%s/access_token' % (self._API_BASE, channel_id), channel_id,
'Downloading channel access token')
query = {
'allow_source': 'true',
'p': random.randint(1000000, 10000000),
'player': 'twitchweb',
'segment_preference': '4',
'sig': access_token['sig'].encode('utf-8'),
'token': access_token['token'].encode('utf-8'),
}
formats = self._extract_m3u8_formats(
'%s/api/channel/hls/%s.m3u8?%s'
% (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)),
channel_id, 'mp4')
self._prefer_source(formats)
view_count = stream.get('viewers')
timestamp = parse_iso8601(stream.get('created_at'))
channel = stream['channel']
title = self._live_title(channel.get('display_name') or channel.get('name'))
description = channel.get('status')
thumbnails = []
for thumbnail_key, thumbnail_url in stream['preview'].items():
m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
if not m:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return {
'id': compat_str(stream['_id']),
'display_id': channel_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'uploader': channel.get('display_name'),
'uploader_id': channel.get('name'),
'timestamp': timestamp,
'view_count': view_count,
'formats': formats,
'is_live': True,
}
|
unlicense
|
callofdutyops/YXH2016724098982
|
eye_input.py
|
1
|
9130
|
"""Routine for decoding the eye files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
# Process images of this size. Note that this differs from the original eye
# image size of 512 x 512. This smaller size can reduce consuming of computer memory.
# If one alters this number, then the entire model architecture will change and
# any model would need to be retrained.
IMAGE_SIZE = 32
# Global constants describing the eye data set.
NUM_CLASSES = 7
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 315
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 35
# The folder names of eye images.
LABLES = ['AMD', 'BRVO_CRVO', 'CSC', 'DR', 'ENormal', 'FOthers', 'GPM']
def read_eye(filename_queue):
"""Reads and parses examples from eye files.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (512)
width: number of columns in the result (512)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..6.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class EYERecord(object):
pass
result = EYERecord()
# Dimensions of the images in the eye dataset.
label_bytes = 1
result.height = 512
result.width = 512
result.depth = 3
# image_bytes = result.height * result.width * result.depth
# Read a record, getting filenames from the filename_queue.
reader = tf.WholeFileReader()
result.key, value = reader.read(filename_queue)
# The first bytes represent the label, which we convert from uint8->int32.
# LABLES.index(str(result.key.eval(session=tf.get_default_session())).split('_')[0])
# Convert from a string to a vector of uint8 that is record_bytes long.
label_decoded = tf.decode_raw(result.key, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
# Note that the number 55 is the length of str
# "/home/hp/Documents/DeepLearning/MyProjects/Data/eye/[tr,te]/". We
# only want to get the folder name after this str so we do a slice on it.
# And also, the ops of "add(result.label, -65)" is to get the label in right range
# which is 0..6.
result.label = tf.cast(tf.slice(label_decoded, [61], [label_bytes]), tf.int32)
result.label = tf.add(result.label, -65)
result.uint8image = tf.image.decode_png(value)
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 4
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for eye training using the Reader ops.
Args:
data_dir: Path to the eye data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
data_dir = os.path.join(data_dir, 'tr')
filenames = []
for folder_name in LABLES:
folder_path = os.path.join(data_dir, folder_name)
filenames += [os.path.join(folder_path, f) for f in os.listdir(folder_path)]
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_eye(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.1
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print('Filling queue with %d eye images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
"""Construct input for eye evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the eye data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
data_dir = os.path.join(data_dir, 'tr')
filenames = []
for folder_name in LABLES:
folder_path = os.path.join(data_dir, folder_name)
filenames += [os.path.join(folder_path, f) for f in os.listdir(folder_path)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
data_dir = os.path.join(data_dir, 'te')
filenames = []
for folder_name in LABLES:
folder_path = os.path.join(data_dir, folder_name)
filenames += [os.path.join(folder_path, f) for f in os.listdir(folder_path)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_eye(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Fix the shape of Tensor
float_image.set_shape([height, width, 3])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.1
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False)
|
mit
|
LukeCarrier/py3k-pexpect
|
tests/platform_tests/test_handler.py
|
1
|
1606
|
#!/usr/bin/env python
import signal, os, time, errno, pty, sys, fcntl, tty
GLOBAL_SIGCHLD_RECEIVED = 0
def nonblock (fd):
# if O_NDELAY is set read() returns 0 (ambiguous with EOF).
# if O_NONBLOCK is set read() returns -1 and sets errno to EAGAIN
original_flags = fcntl.fcntl (fd, fcntl.F_GETFL, 0)
flags = original_flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
return original_flags
def signal_handler (signum, frame):
print('<HANDLER>')
global GLOBAL_SIGCHLD_RECEIVED
status = os.waitpid (-1, os.WNOHANG)
if status[0] == 0:
print('No process for waitpid:', status)
else:
print('Status:', status)
print('WIFEXITED(status):', os.WIFEXITED(status[1]))
print('WEXITSTATUS(status):', os.WEXITSTATUS(status[1]))
GLOBAL_SIGCHLD_RECEIVED = 1
def main ():
signal.signal (signal.SIGCHLD, signal_handler)
pid, fd = pty.fork()
if pid == 0:
os.write (sys.stdout.fileno(), 'This is a test.\nThis is a test.')
time.sleep(10000)
nonblock (fd)
tty.setraw(fd) #STDIN_FILENO)
print('Sending SIGKILL to child pid:', pid)
time.sleep(2)
os.kill (pid, signal.SIGKILL)
print('Entering to sleep...')
try:
time.sleep(2)
except:
print('Sleep interrupted')
try:
os.kill(pid, 0)
print('\tChild is alive. This is ambiguous because it may be a Zombie.')
except OSError as e:
print('\tChild appears to be dead.')
# print str(e)
print()
print('Reading from master fd:', os.read (fd, 1000))
if __name__ == '__main__':
main ()
|
mit
|
Gui13/CouchPotatoServer
|
libs/xmpp/dispatcher.py
|
200
|
17974
|
## transports.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: dispatcher.py,v 1.42 2007/05/18 23:18:36 normanr Exp $
"""
Main xmpppy mechanism. Provides library with methods to assign different handlers
to different XMPP stanzas.
Contains one tunable attribute: DefaultTimeout (25 seconds by default). It defines time that
Dispatcher.SendAndWaitForResponce method will wait for reply stanza before giving up.
"""
import simplexml,time,sys
from protocol import *
from client import PlugIn
DefaultTimeout=25
ID=0
class Dispatcher(PlugIn):
""" Ancestor of PlugIn class. Handles XMPP stream, i.e. aware of stream headers.
Can be plugged out/in to restart these headers (used for SASL f.e.). """
def __init__(self):
PlugIn.__init__(self)
DBG_LINE='dispatcher'
self.handlers={}
self._expected={}
self._defaultHandler=None
self._pendingExceptions=[]
self._eventHandler=None
self._cycleHandlers=[]
self._exported_methods=[self.Process,self.RegisterHandler,self.RegisterDefaultHandler,\
self.RegisterEventHandler,self.UnregisterCycleHandler,self.RegisterCycleHandler,\
self.RegisterHandlerOnce,self.UnregisterHandler,self.RegisterProtocol,\
self.WaitForResponse,self.SendAndWaitForResponse,self.send,self.disconnect,\
self.SendAndCallForResponse, ]
def dumpHandlers(self):
""" Return set of user-registered callbacks in it's internal format.
Used within the library to carry user handlers set over Dispatcher replugins. """
return self.handlers
def restoreHandlers(self,handlers):
""" Restores user-registered callbacks structure from dump previously obtained via dumpHandlers.
Used within the library to carry user handlers set over Dispatcher replugins. """
self.handlers=handlers
def _init(self):
""" Registers default namespaces/protocols/handlers. Used internally. """
self.RegisterNamespace('unknown')
self.RegisterNamespace(NS_STREAMS)
self.RegisterNamespace(self._owner.defaultNamespace)
self.RegisterProtocol('iq',Iq)
self.RegisterProtocol('presence',Presence)
self.RegisterProtocol('message',Message)
self.RegisterDefaultHandler(self.returnStanzaHandler)
self.RegisterHandler('error',self.streamErrorHandler,xmlns=NS_STREAMS)
def plugin(self, owner):
""" Plug the Dispatcher instance into Client class instance and send initial stream header. Used internally."""
self._init()
for method in self._old_owners_methods:
if method.__name__=='send': self._owner_send=method; break
self._owner.lastErrNode=None
self._owner.lastErr=None
self._owner.lastErrCode=None
self.StreamInit()
def plugout(self):
""" Prepares instance to be destructed. """
self.Stream.dispatch=None
self.Stream.DEBUG=None
self.Stream.features=None
self.Stream.destroy()
def StreamInit(self):
""" Send an initial stream header. """
self.Stream=simplexml.NodeBuilder()
self.Stream._dispatch_depth=2
self.Stream.dispatch=self.dispatch
self.Stream.stream_header_received=self._check_stream_start
self._owner.debug_flags.append(simplexml.DBG_NODEBUILDER)
self.Stream.DEBUG=self._owner.DEBUG
self.Stream.features=None
self._metastream=Node('stream:stream')
self._metastream.setNamespace(self._owner.Namespace)
self._metastream.setAttr('version','1.0')
self._metastream.setAttr('xmlns:stream',NS_STREAMS)
self._metastream.setAttr('to',self._owner.Server)
self._owner.send("<?xml version='1.0'?>%s>"%str(self._metastream)[:-2])
def _check_stream_start(self,ns,tag,attrs):
if ns<>NS_STREAMS or tag<>'stream':
raise ValueError('Incorrect stream start: (%s,%s). Terminating.'%(tag,ns))
def Process(self, timeout=0):
""" Check incoming stream for data waiting. If "timeout" is positive - block for as max. this time.
Returns:
1) length of processed data if some data were processed;
2) '0' string if no data were processed but link is alive;
3) 0 (zero) if underlying connection is closed.
Take note that in case of disconnection detect during Process() call
disconnect handlers are called automatically.
"""
for handler in self._cycleHandlers: handler(self)
if len(self._pendingExceptions) > 0:
_pendingException = self._pendingExceptions.pop()
raise _pendingException[0], _pendingException[1], _pendingException[2]
if self._owner.Connection.pending_data(timeout):
try: data=self._owner.Connection.receive()
except IOError: return
self.Stream.Parse(data)
if len(self._pendingExceptions) > 0:
_pendingException = self._pendingExceptions.pop()
raise _pendingException[0], _pendingException[1], _pendingException[2]
if data: return len(data)
return '0' # It means that nothing is received but link is alive.
def RegisterNamespace(self,xmlns,order='info'):
""" Creates internal structures for newly registered namespace.
You can register handlers for this namespace afterwards. By default one namespace
already registered (jabber:client or jabber:component:accept depending on context. """
self.DEBUG('Registering namespace "%s"'%xmlns,order)
self.handlers[xmlns]={}
self.RegisterProtocol('unknown',Protocol,xmlns=xmlns)
self.RegisterProtocol('default',Protocol,xmlns=xmlns)
def RegisterProtocol(self,tag_name,Proto,xmlns=None,order='info'):
""" Used to declare some top-level stanza name to dispatcher.
Needed to start registering handlers for such stanzas.
Iq, message and presence protocols are registered by default. """
if not xmlns: xmlns=self._owner.defaultNamespace
self.DEBUG('Registering protocol "%s" as %s(%s)'%(tag_name,Proto,xmlns), order)
self.handlers[xmlns][tag_name]={type:Proto, 'default':[]}
def RegisterNamespaceHandler(self,xmlns,handler,typ='',ns='', makefirst=0, system=0):
""" Register handler for processing all stanzas for specified namespace. """
self.RegisterHandler('default', handler, typ, ns, xmlns, makefirst, system)
def RegisterHandler(self,name,handler,typ='',ns='',xmlns=None, makefirst=0, system=0):
"""Register user callback as stanzas handler of declared type. Callback must take
(if chained, see later) arguments: dispatcher instance (for replying), incomed
return of previous handlers.
The callback must raise xmpp.NodeProcessed just before return if it want preven
callbacks to be called with the same stanza as argument _and_, more importantly
library from returning stanza to sender with error set (to be enabled in 0.2 ve
Arguments:
"name" - name of stanza. F.e. "iq".
"handler" - user callback.
"typ" - value of stanza's "type" attribute. If not specified any value match
"ns" - namespace of child that stanza must contain.
"chained" - chain together output of several handlers.
"makefirst" - insert handler in the beginning of handlers list instead of
adding it to the end. Note that more common handlers (i.e. w/o "typ" and "
will be called first nevertheless.
"system" - call handler even if NodeProcessed Exception were raised already.
"""
if not xmlns: xmlns=self._owner.defaultNamespace
self.DEBUG('Registering handler %s for "%s" type->%s ns->%s(%s)'%(handler,name,typ,ns,xmlns), 'info')
if not typ and not ns: typ='default'
if not self.handlers.has_key(xmlns): self.RegisterNamespace(xmlns,'warn')
if not self.handlers[xmlns].has_key(name): self.RegisterProtocol(name,Protocol,xmlns,'warn')
if not self.handlers[xmlns][name].has_key(typ+ns): self.handlers[xmlns][name][typ+ns]=[]
if makefirst: self.handlers[xmlns][name][typ+ns].insert(0,{'func':handler,'system':system})
else: self.handlers[xmlns][name][typ+ns].append({'func':handler,'system':system})
def RegisterHandlerOnce(self,name,handler,typ='',ns='',xmlns=None,makefirst=0, system=0):
""" Unregister handler after first call (not implemented yet). """
if not xmlns: xmlns=self._owner.defaultNamespace
self.RegisterHandler(name, handler, typ, ns, xmlns, makefirst, system)
def UnregisterHandler(self,name,handler,typ='',ns='',xmlns=None):
""" Unregister handler. "typ" and "ns" must be specified exactly the same as with registering."""
if not xmlns: xmlns=self._owner.defaultNamespace
if not self.handlers.has_key(xmlns): return
if not typ and not ns: typ='default'
for pack in self.handlers[xmlns][name][typ+ns]:
if handler==pack['func']: break
else: pack=None
try: self.handlers[xmlns][name][typ+ns].remove(pack)
except ValueError: pass
def RegisterDefaultHandler(self,handler):
""" Specify the handler that will be used if no NodeProcessed exception were raised.
This is returnStanzaHandler by default. """
self._defaultHandler=handler
def RegisterEventHandler(self,handler):
""" Register handler that will process events. F.e. "FILERECEIVED" event. """
self._eventHandler=handler
def returnStanzaHandler(self,conn,stanza):
""" Return stanza back to the sender with <feature-not-implemennted/> error set. """
if stanza.getType() in ['get','set']:
conn.send(Error(stanza,ERR_FEATURE_NOT_IMPLEMENTED))
def streamErrorHandler(self,conn,error):
name,text='error',error.getData()
for tag in error.getChildren():
if tag.getNamespace()==NS_XMPP_STREAMS:
if tag.getName()=='text': text=tag.getData()
else: name=tag.getName()
if name in stream_exceptions.keys(): exc=stream_exceptions[name]
else: exc=StreamError
raise exc((name,text))
def RegisterCycleHandler(self,handler):
""" Register handler that will be called on every Dispatcher.Process() call. """
if handler not in self._cycleHandlers: self._cycleHandlers.append(handler)
def UnregisterCycleHandler(self,handler):
""" Unregister handler that will is called on every Dispatcher.Process() call."""
if handler in self._cycleHandlers: self._cycleHandlers.remove(handler)
def Event(self,realm,event,data):
""" Raise some event. Takes three arguments:
1) "realm" - scope of event. Usually a namespace.
2) "event" - the event itself. F.e. "SUCESSFULL SEND".
3) data that comes along with event. Depends on event."""
if self._eventHandler: self._eventHandler(realm,event,data)
def dispatch(self,stanza,session=None,direct=0):
""" Main procedure that performs XMPP stanza recognition and calling apppropriate handlers for it.
Called internally. """
if not session: session=self
session.Stream._mini_dom=None
name=stanza.getName()
if not direct and self._owner._route:
if name == 'route':
if stanza.getAttr('error') == None:
if len(stanza.getChildren()) == 1:
stanza = stanza.getChildren()[0]
name=stanza.getName()
else:
for each in stanza.getChildren():
self.dispatch(each,session,direct=1)
return
elif name == 'presence':
return
elif name in ('features','bind'):
pass
else:
raise UnsupportedStanzaType(name)
if name=='features': session.Stream.features=stanza
xmlns=stanza.getNamespace()
if not self.handlers.has_key(xmlns):
self.DEBUG("Unknown namespace: " + xmlns,'warn')
xmlns='unknown'
if not self.handlers[xmlns].has_key(name):
self.DEBUG("Unknown stanza: " + name,'warn')
name='unknown'
else:
self.DEBUG("Got %s/%s stanza"%(xmlns,name), 'ok')
if stanza.__class__.__name__=='Node': stanza=self.handlers[xmlns][name][type](node=stanza)
typ=stanza.getType()
if not typ: typ=''
stanza.props=stanza.getProperties()
ID=stanza.getID()
session.DEBUG("Dispatching %s stanza with type->%s props->%s id->%s"%(name,typ,stanza.props,ID),'ok')
list=['default'] # we will use all handlers:
if self.handlers[xmlns][name].has_key(typ): list.append(typ) # from very common...
for prop in stanza.props:
if self.handlers[xmlns][name].has_key(prop): list.append(prop)
if typ and self.handlers[xmlns][name].has_key(typ+prop): list.append(typ+prop) # ...to very particular
chain=self.handlers[xmlns]['default']['default']
for key in list:
if key: chain = chain + self.handlers[xmlns][name][key]
output=''
if session._expected.has_key(ID):
user=0
if type(session._expected[ID])==type(()):
cb,args=session._expected[ID]
session.DEBUG("Expected stanza arrived. Callback %s(%s) found!"%(cb,args),'ok')
try: cb(session,stanza,**args)
except Exception, typ:
if typ.__class__.__name__<>'NodeProcessed': raise
else:
session.DEBUG("Expected stanza arrived!",'ok')
session._expected[ID]=stanza
else: user=1
for handler in chain:
if user or handler['system']:
try:
handler['func'](session,stanza)
except Exception, typ:
if typ.__class__.__name__<>'NodeProcessed':
self._pendingExceptions.insert(0, sys.exc_info())
return
user=0
if user and self._defaultHandler: self._defaultHandler(session,stanza)
def WaitForResponse(self, ID, timeout=DefaultTimeout):
""" Block and wait until stanza with specific "id" attribute will come.
If no such stanza is arrived within timeout, return None.
If operation failed for some reason then owner's attributes
lastErrNode, lastErr and lastErrCode are set accordingly. """
self._expected[ID]=None
has_timed_out=0
abort_time=time.time() + timeout
self.DEBUG("Waiting for ID:%s with timeout %s..." % (ID,timeout),'wait')
while not self._expected[ID]:
if not self.Process(0.04):
self._owner.lastErr="Disconnect"
return None
if time.time() > abort_time:
self._owner.lastErr="Timeout"
return None
response=self._expected[ID]
del self._expected[ID]
if response.getErrorCode():
self._owner.lastErrNode=response
self._owner.lastErr=response.getError()
self._owner.lastErrCode=response.getErrorCode()
return response
def SendAndWaitForResponse(self, stanza, timeout=DefaultTimeout):
""" Put stanza on the wire and wait for recipient's response to it. """
return self.WaitForResponse(self.send(stanza),timeout)
def SendAndCallForResponse(self, stanza, func, args={}):
""" Put stanza on the wire and call back when recipient replies.
Additional callback arguments can be specified in args. """
self._expected[self.send(stanza)]=(func,args)
def send(self,stanza):
""" Serialise stanza and put it on the wire. Assign an unique ID to it before send.
Returns assigned ID."""
if type(stanza) in [type(''), type(u'')]: return self._owner_send(stanza)
if not isinstance(stanza,Protocol): _ID=None
elif not stanza.getID():
global ID
ID+=1
_ID=`ID`
stanza.setID(_ID)
else: _ID=stanza.getID()
if self._owner._registered_name and not stanza.getAttr('from'): stanza.setAttr('from',self._owner._registered_name)
if self._owner._route and stanza.getName()!='bind':
to=self._owner.Server
if stanza.getTo() and stanza.getTo().getDomain():
to=stanza.getTo().getDomain()
frm=stanza.getFrom()
if frm.getDomain():
frm=frm.getDomain()
route=Protocol('route',to=to,frm=frm,payload=[stanza])
stanza=route
stanza.setNamespace(self._owner.Namespace)
stanza.setParent(self._metastream)
self._owner_send(stanza)
return _ID
def disconnect(self):
""" Send a stream terminator and and handle all incoming stanzas before stream closure. """
self._owner_send('</stream:stream>')
while self.Process(1): pass
|
gpl-3.0
|
Livefyre/django-cms
|
cms/utils/django_load.py
|
1
|
3693
|
# -*- coding: utf-8 -*-
"""
This is revision from 3058ab9d9d4875589638cc45e84b59e7e1d7c9c3 of
https://github.com/ojii/django-load.
ANY changes to this file, be it upstream fixes or changes for the cms *must* be
documented clearly within this file with comments.
For documentation on how to use the functions described in this file, please
refer to http://django-load.readthedocs.org/en/latest/index.html.
"""
import traceback # changed
from django.utils.importlib import import_module
from django.utils.six.moves import filter, map
from .compat.dj import installed_apps
def get_module(app, modname, verbose, failfast):
"""
Internal function to load a module from a single app.
"""
module_name = '%s.%s' % (app, modname)
# the module *should* exist - raise an error if it doesn't
app_mod = import_module(app)
try:
module = import_module(module_name)
except ImportError:
# this ImportError will be due to the module not existing
# so here we can silently ignore it. But an ImportError
# when we import_module() should not be ignored
if failfast:
raise
elif verbose:
print(u"Could not find %r from %r" % (modname, app)) # changed
traceback.print_exc() # changed
return None
if verbose:
print(u"Loaded %r from %r" % (modname, app))
return module
def load(modname, verbose=False, failfast=False):
"""
Loads all modules with name 'modname' from all installed apps.
If verbose is True, debug information will be printed to stdout.
If failfast is True, import errors will not be surpressed.
"""
for app in installed_apps():
get_module(app, modname, verbose, failfast)
def iterload(modname, verbose=False, failfast=False):
"""
Loads all modules with name 'modname' from all installed apps and returns
and iterator of those modules.
If verbose is True, debug information will be printed to stdout.
If failfast is True, import errors will not be surpressed.
"""
return filter(None, (get_module(app, modname, verbose, failfast)
for app in installed_apps()))
def load_object(import_path):
"""
Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the
likes.
Import paths should be: "mypackage.mymodule.MyObject". It then imports the
module up until the last dot and tries to get the attribute after that dot
from the imported module.
If the import path does not contain any dots, a TypeError is raised.
If the module cannot be imported, an ImportError is raised.
If the attribute does not exist in the module, a AttributeError is raised.
"""
if '.' not in import_path:
raise TypeError(
"'import_path' argument to 'django_load.core.load_object' must "
"contain at least one dot."
)
module_name, object_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, object_name)
def iterload_objects(import_paths):
"""
Load a list of objects.
"""
return map(load_object, import_paths)
def get_subclasses(c):
"""
Get all subclasses of a given class
"""
return c.__subclasses__() + sum(map(get_subclasses, c.__subclasses__()), [])
def load_from_file(module_path):
"""
Load a python module from its absolute filesystem path
"""
from imp import load_module, PY_SOURCE
imported = None
if module_path:
with open(module_path, 'r') as openfile:
imported = load_module("mod", openfile, module_path, ('imported', 'r', PY_SOURCE))
return imported
|
bsd-3-clause
|
tdsmith/numpy
|
numpy/distutils/from_template.py
|
164
|
7822
|
#!/usr/bin/python
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separeted words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
from __future__ import division, absolute_import, print_function
__all__ = ['process_str', 'process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while True:
m = routine_start_re.search(astr, ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr, start, m.end()):
while True:
i = astr.rfind('\n', ind, start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr, m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start, end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace('\,', '@comma@')
thelist = conv(repl)
names[name] = thelist
return names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr, names):
substr = substr.replace('\>', '@rightarrow@')
substr = substr.replace('\<', '@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace('\,', '@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r, names.get(r, None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"
" for <%s=%s>. Ignoring." %
(base_rule, ','.join(rules[base_rule]), r, thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name, (k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@', '>')
newstr = newstr.replace('@leftarrow@', '<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = '' #_head # using _head will break free-format files
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
writestr += newstr[oldend:sub[0]]
names.update(find_repl_patterns(newstr[oldend:sub[0]]))
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
|
bsd-3-clause
|
Nashenas88/servo
|
tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/reference/support/generate-text-emphasis-position-property-tests.py
|
841
|
3343
|
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-position-property-001 ~ 006
which cover all possible values of text-emphasis-position property with
all combination of three main writing modes and two orientations. Only
test files are generated by this script. It also outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
import itertools
TEST_FILE = 'text-emphasis-position-property-{:03}{}.html'
REF_FILE = 'text-emphasis-position-property-{:03}-ref.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis-position: {value}, {title}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="'text-emphasis-position: {value}' with 'writing-mode: {wm}' puts emphasis marks {position} the text.">
<link rel="match" href="text-emphasis-position-property-{index:03}-ref.html">
<p>Pass if the emphasis marks are {position} the text below:</p>
<div style="line-height: 5; text-emphasis: circle; writing-mode: {wm}; text-orientation: {orient}; text-emphasis-position: {value}">試験テスト</div>
'''
SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e', 'f', 'g']
WRITING_MODES = ["horizontal-tb", "vertical-rl", "vertical-lr"]
POSITION_HORIZONTAL = ["over", "under"]
POSITION_VERTICAL = ["right", "left"]
REF_MAP_MIXED = { "over": 1, "under": 2, "right": 3, "left": 4 }
REF_MAP_SIDEWAYS = { "right": 5, "left": 6 }
POSITION_TEXT = { "over": "over", "under": "under",
"right": "to the right of", "left": "to the left of" }
suffixes = [iter(SUFFIXES) for i in range(6)]
reftest_items = []
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
def write_test_file(idx, suffix, wm, orient, value, position):
filename = TEST_FILE.format(idx, suffix)
write_file(filename, TEST_TEMPLATE.format(
value=value, wm=wm, orient=orient, index=idx, position=position,
title=(wm if orient == "mixed" else "{}, {}".format(wm, orient))))
reftest_items.append("== {} {}".format(filename, REF_FILE.format(idx)))
def write_test_files(wm, orient, pos1, pos2):
idx = (REF_MAP_MIXED if orient == "mixed" else REF_MAP_SIDEWAYS)[pos1]
position = POSITION_TEXT[pos1]
suffix = suffixes[idx - 1]
write_test_file(idx, next(suffix), wm, orient, pos1 + " " + pos2, position)
write_test_file(idx, next(suffix), wm, orient, pos2 + " " + pos1, position)
for wm in WRITING_MODES:
if wm == "horizontal-tb":
effective_pos = POSITION_HORIZONTAL
ineffective_pos = POSITION_VERTICAL
else:
effective_pos = POSITION_VERTICAL
ineffective_pos = POSITION_HORIZONTAL
for pos1, pos2 in itertools.product(effective_pos, ineffective_pos):
write_test_files(wm, "mixed", pos1, pos2)
if wm != "horizontal-tb":
write_test_files(wm, "sideways", pos1, pos2)
print("# START tests from {}".format(__file__))
reftest_items.sort()
for item in reftest_items:
print(item)
print("# END tests from {}".format(__file__))
|
mpl-2.0
|
FireWRT/OpenWrt-Firefly-Libraries
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/encodings/iso8859_5.py
|
593
|
13271
|
""" Python Character Mapping Codec iso8859_5 generated from 'MAPPINGS/ISO8859/8859-5.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-5',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0401' # 0xA1 -> CYRILLIC CAPITAL LETTER IO
u'\u0402' # 0xA2 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0xA3 -> CYRILLIC CAPITAL LETTER GJE
u'\u0404' # 0xA4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0405' # 0xA5 -> CYRILLIC CAPITAL LETTER DZE
u'\u0406' # 0xA6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xA7 -> CYRILLIC CAPITAL LETTER YI
u'\u0408' # 0xA8 -> CYRILLIC CAPITAL LETTER JE
u'\u0409' # 0xA9 -> CYRILLIC CAPITAL LETTER LJE
u'\u040a' # 0xAA -> CYRILLIC CAPITAL LETTER NJE
u'\u040b' # 0xAB -> CYRILLIC CAPITAL LETTER TSHE
u'\u040c' # 0xAC -> CYRILLIC CAPITAL LETTER KJE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u040e' # 0xAE -> CYRILLIC CAPITAL LETTER SHORT U
u'\u040f' # 0xAF -> CYRILLIC CAPITAL LETTER DZHE
u'\u0410' # 0xB0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xB1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xB2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xB3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xB4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xB5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xB6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xB7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xB8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xB9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xBA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xBB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xBC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xBD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xBE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xBF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xC0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xC1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xC2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xC3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xC4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xC5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xC6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xC7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xC8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xC9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xCA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xCB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xCC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xCD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xCE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xCF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xD0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xD1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xD2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xD3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xD4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xD5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xD7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xD8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xD9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xDA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xDB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xDC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xDD -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xDE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xDF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xE0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xE1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xE2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xE3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xE4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xE5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xE6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xE7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xE8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xE9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xEA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xEB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xEC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xED -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xEE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xEF -> CYRILLIC SMALL LETTER YA
u'\u2116' # 0xF0 -> NUMERO SIGN
u'\u0451' # 0xF1 -> CYRILLIC SMALL LETTER IO
u'\u0452' # 0xF2 -> CYRILLIC SMALL LETTER DJE
u'\u0453' # 0xF3 -> CYRILLIC SMALL LETTER GJE
u'\u0454' # 0xF4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0455' # 0xF5 -> CYRILLIC SMALL LETTER DZE
u'\u0456' # 0xF6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xF7 -> CYRILLIC SMALL LETTER YI
u'\u0458' # 0xF8 -> CYRILLIC SMALL LETTER JE
u'\u0459' # 0xF9 -> CYRILLIC SMALL LETTER LJE
u'\u045a' # 0xFA -> CYRILLIC SMALL LETTER NJE
u'\u045b' # 0xFB -> CYRILLIC SMALL LETTER TSHE
u'\u045c' # 0xFC -> CYRILLIC SMALL LETTER KJE
u'\xa7' # 0xFD -> SECTION SIGN
u'\u045e' # 0xFE -> CYRILLIC SMALL LETTER SHORT U
u'\u045f' # 0xFF -> CYRILLIC SMALL LETTER DZHE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-2.0
|
robintw/scikit-image
|
skimage/viewer/qt.py
|
48
|
1281
|
_qt_version = None
has_qt = True
try:
from matplotlib.backends.qt_compat import QtGui, QtCore, QtWidgets, QT_RC_MAJOR_VERSION as _qt_version
except ImportError:
try:
from matplotlib.backends.qt4_compat import QtGui, QtCore
QtWidgets = QtGui
_qt_version = 4
except ImportError:
# Mock objects
class QtGui_cls(object):
QMainWindow = object
QDialog = object
QWidget = object
class QtCore_cls(object):
class Qt(object):
TopDockWidgetArea = None
BottomDockWidgetArea = None
LeftDockWidgetArea = None
RightDockWidgetArea = None
def Signal(self, *args, **kwargs):
pass
QtGui = QtWidgets = QtGui_cls()
QtCore = QtCore_cls()
has_qt = False
if _qt_version == 5:
from matplotlib.backends.backend_qt5 import FigureManagerQT
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
elif _qt_version == 4:
from matplotlib.backends.backend_qt4 import FigureManagerQT
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
else:
FigureManagerQT = object
FigureCanvasQTAgg = object
Qt = QtCore.Qt
Signal = QtCore.Signal
|
bsd-3-clause
|
mitghi/pyopenssl
|
src/OpenSSL/SSL.py
|
13
|
65522
|
import socket
from sys import platform
from functools import wraps, partial
from itertools import count, chain
from weakref import WeakValueDictionary
from errno import errorcode
from six import binary_type as _binary_type
from six import integer_types as integer_types
from six import int2byte, indexbytes
from OpenSSL._util import (
UNSPECIFIED as _UNSPECIFIED,
exception_from_error_queue as _exception_from_error_queue,
ffi as _ffi,
lib as _lib,
make_assert as _make_assert,
native as _native,
path_string as _path_string,
text_to_bytes_and_warn as _text_to_bytes_and_warn,
)
from OpenSSL.crypto import (
FILETYPE_PEM, _PassphraseHelper, PKey, X509Name, X509, X509Store)
try:
_memoryview = memoryview
except NameError:
class _memoryview(object):
pass
try:
_buffer = buffer
except NameError:
class _buffer(object):
pass
OPENSSL_VERSION_NUMBER = _lib.OPENSSL_VERSION_NUMBER
SSLEAY_VERSION = _lib.SSLEAY_VERSION
SSLEAY_CFLAGS = _lib.SSLEAY_CFLAGS
SSLEAY_PLATFORM = _lib.SSLEAY_PLATFORM
SSLEAY_DIR = _lib.SSLEAY_DIR
SSLEAY_BUILT_ON = _lib.SSLEAY_BUILT_ON
SENT_SHUTDOWN = _lib.SSL_SENT_SHUTDOWN
RECEIVED_SHUTDOWN = _lib.SSL_RECEIVED_SHUTDOWN
SSLv2_METHOD = 1
SSLv3_METHOD = 2
SSLv23_METHOD = 3
TLSv1_METHOD = 4
TLSv1_1_METHOD = 5
TLSv1_2_METHOD = 6
OP_NO_SSLv2 = _lib.SSL_OP_NO_SSLv2
OP_NO_SSLv3 = _lib.SSL_OP_NO_SSLv3
OP_NO_TLSv1 = _lib.SSL_OP_NO_TLSv1
OP_NO_TLSv1_1 = getattr(_lib, "SSL_OP_NO_TLSv1_1", 0)
OP_NO_TLSv1_2 = getattr(_lib, "SSL_OP_NO_TLSv1_2", 0)
MODE_RELEASE_BUFFERS = _lib.SSL_MODE_RELEASE_BUFFERS
OP_SINGLE_DH_USE = _lib.SSL_OP_SINGLE_DH_USE
OP_SINGLE_ECDH_USE = _lib.SSL_OP_SINGLE_ECDH_USE
OP_EPHEMERAL_RSA = _lib.SSL_OP_EPHEMERAL_RSA
OP_MICROSOFT_SESS_ID_BUG = _lib.SSL_OP_MICROSOFT_SESS_ID_BUG
OP_NETSCAPE_CHALLENGE_BUG = _lib.SSL_OP_NETSCAPE_CHALLENGE_BUG
OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG = (
_lib.SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG
)
OP_SSLREF2_REUSE_CERT_TYPE_BUG = _lib.SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG
OP_MICROSOFT_BIG_SSLV3_BUFFER = _lib.SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER
OP_MSIE_SSLV2_RSA_PADDING = _lib.SSL_OP_MSIE_SSLV2_RSA_PADDING
OP_SSLEAY_080_CLIENT_DH_BUG = _lib.SSL_OP_SSLEAY_080_CLIENT_DH_BUG
OP_TLS_D5_BUG = _lib.SSL_OP_TLS_D5_BUG
OP_TLS_BLOCK_PADDING_BUG = _lib.SSL_OP_TLS_BLOCK_PADDING_BUG
OP_DONT_INSERT_EMPTY_FRAGMENTS = _lib.SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
OP_CIPHER_SERVER_PREFERENCE = _lib.SSL_OP_CIPHER_SERVER_PREFERENCE
OP_TLS_ROLLBACK_BUG = _lib.SSL_OP_TLS_ROLLBACK_BUG
OP_PKCS1_CHECK_1 = _lib.SSL_OP_PKCS1_CHECK_1
OP_PKCS1_CHECK_2 = _lib.SSL_OP_PKCS1_CHECK_2
OP_NETSCAPE_CA_DN_BUG = _lib.SSL_OP_NETSCAPE_CA_DN_BUG
OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG = (
_lib.SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG
)
OP_NO_COMPRESSION = _lib.SSL_OP_NO_COMPRESSION
OP_NO_QUERY_MTU = _lib.SSL_OP_NO_QUERY_MTU
OP_COOKIE_EXCHANGE = _lib.SSL_OP_COOKIE_EXCHANGE
OP_NO_TICKET = _lib.SSL_OP_NO_TICKET
OP_ALL = _lib.SSL_OP_ALL
VERIFY_PEER = _lib.SSL_VERIFY_PEER
VERIFY_FAIL_IF_NO_PEER_CERT = _lib.SSL_VERIFY_FAIL_IF_NO_PEER_CERT
VERIFY_CLIENT_ONCE = _lib.SSL_VERIFY_CLIENT_ONCE
VERIFY_NONE = _lib.SSL_VERIFY_NONE
SESS_CACHE_OFF = _lib.SSL_SESS_CACHE_OFF
SESS_CACHE_CLIENT = _lib.SSL_SESS_CACHE_CLIENT
SESS_CACHE_SERVER = _lib.SSL_SESS_CACHE_SERVER
SESS_CACHE_BOTH = _lib.SSL_SESS_CACHE_BOTH
SESS_CACHE_NO_AUTO_CLEAR = _lib.SSL_SESS_CACHE_NO_AUTO_CLEAR
SESS_CACHE_NO_INTERNAL_LOOKUP = _lib.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP
SESS_CACHE_NO_INTERNAL_STORE = _lib.SSL_SESS_CACHE_NO_INTERNAL_STORE
SESS_CACHE_NO_INTERNAL = _lib.SSL_SESS_CACHE_NO_INTERNAL
SSL_ST_CONNECT = _lib.SSL_ST_CONNECT
SSL_ST_ACCEPT = _lib.SSL_ST_ACCEPT
SSL_ST_MASK = _lib.SSL_ST_MASK
if _lib.Cryptography_HAS_SSL_ST:
SSL_ST_INIT = _lib.SSL_ST_INIT
SSL_ST_BEFORE = _lib.SSL_ST_BEFORE
SSL_ST_OK = _lib.SSL_ST_OK
SSL_ST_RENEGOTIATE = _lib.SSL_ST_RENEGOTIATE
SSL_CB_LOOP = _lib.SSL_CB_LOOP
SSL_CB_EXIT = _lib.SSL_CB_EXIT
SSL_CB_READ = _lib.SSL_CB_READ
SSL_CB_WRITE = _lib.SSL_CB_WRITE
SSL_CB_ALERT = _lib.SSL_CB_ALERT
SSL_CB_READ_ALERT = _lib.SSL_CB_READ_ALERT
SSL_CB_WRITE_ALERT = _lib.SSL_CB_WRITE_ALERT
SSL_CB_ACCEPT_LOOP = _lib.SSL_CB_ACCEPT_LOOP
SSL_CB_ACCEPT_EXIT = _lib.SSL_CB_ACCEPT_EXIT
SSL_CB_CONNECT_LOOP = _lib.SSL_CB_CONNECT_LOOP
SSL_CB_CONNECT_EXIT = _lib.SSL_CB_CONNECT_EXIT
SSL_CB_HANDSHAKE_START = _lib.SSL_CB_HANDSHAKE_START
SSL_CB_HANDSHAKE_DONE = _lib.SSL_CB_HANDSHAKE_DONE
class Error(Exception):
"""
An error occurred in an `OpenSSL.SSL` API.
"""
_raise_current_error = partial(_exception_from_error_queue, Error)
_openssl_assert = _make_assert(Error)
class WantReadError(Error):
pass
class WantWriteError(Error):
pass
class WantX509LookupError(Error):
pass
class ZeroReturnError(Error):
pass
class SysCallError(Error):
pass
class _CallbackExceptionHelper(object):
"""
A base class for wrapper classes that allow for intelligent exception
handling in OpenSSL callbacks.
:ivar list _problems: Any exceptions that occurred while executing in a
context where they could not be raised in the normal way. Typically
this is because OpenSSL has called into some Python code and requires a
return value. The exceptions are saved to be raised later when it is
possible to do so.
"""
def __init__(self):
self._problems = []
def raise_if_problem(self):
"""
Raise an exception from the OpenSSL error queue or that was previously
captured whe running a callback.
"""
if self._problems:
try:
_raise_current_error()
except Error:
pass
raise self._problems.pop(0)
class _VerifyHelper(_CallbackExceptionHelper):
"""
Wrap a callback such that it can be used as a certificate verification
callback.
"""
def __init__(self, callback):
_CallbackExceptionHelper.__init__(self)
@wraps(callback)
def wrapper(ok, store_ctx):
cert = X509.__new__(X509)
cert._x509 = _lib.X509_STORE_CTX_get_current_cert(store_ctx)
error_number = _lib.X509_STORE_CTX_get_error(store_ctx)
error_depth = _lib.X509_STORE_CTX_get_error_depth(store_ctx)
index = _lib.SSL_get_ex_data_X509_STORE_CTX_idx()
ssl = _lib.X509_STORE_CTX_get_ex_data(store_ctx, index)
connection = Connection._reverse_mapping[ssl]
try:
result = callback(
connection, cert, error_number, error_depth, ok
)
except Exception as e:
self._problems.append(e)
return 0
else:
if result:
_lib.X509_STORE_CTX_set_error(store_ctx, _lib.X509_V_OK)
return 1
else:
return 0
self.callback = _ffi.callback(
"int (*)(int, X509_STORE_CTX *)", wrapper)
class _NpnAdvertiseHelper(_CallbackExceptionHelper):
"""
Wrap a callback such that it can be used as an NPN advertisement callback.
"""
def __init__(self, callback):
_CallbackExceptionHelper.__init__(self)
@wraps(callback)
def wrapper(ssl, out, outlen, arg):
try:
conn = Connection._reverse_mapping[ssl]
protos = callback(conn)
# Join the protocols into a Python bytestring, length-prefixing
# each element.
protostr = b''.join(
chain.from_iterable((int2byte(len(p)), p) for p in protos)
)
# Save our callback arguments on the connection object. This is
# done to make sure that they don't get freed before OpenSSL
# uses them. Then, return them appropriately in the output
# parameters.
conn._npn_advertise_callback_args = [
_ffi.new("unsigned int *", len(protostr)),
_ffi.new("unsigned char[]", protostr),
]
outlen[0] = conn._npn_advertise_callback_args[0][0]
out[0] = conn._npn_advertise_callback_args[1]
return 0
except Exception as e:
self._problems.append(e)
return 2 # SSL_TLSEXT_ERR_ALERT_FATAL
self.callback = _ffi.callback(
"int (*)(SSL *, const unsigned char **, unsigned int *, void *)",
wrapper
)
class _NpnSelectHelper(_CallbackExceptionHelper):
"""
Wrap a callback such that it can be used as an NPN selection callback.
"""
def __init__(self, callback):
_CallbackExceptionHelper.__init__(self)
@wraps(callback)
def wrapper(ssl, out, outlen, in_, inlen, arg):
try:
conn = Connection._reverse_mapping[ssl]
# The string passed to us is actually made up of multiple
# length-prefixed bytestrings. We need to split that into a
# list.
instr = _ffi.buffer(in_, inlen)[:]
protolist = []
while instr:
l = indexbytes(instr, 0)
proto = instr[1:l + 1]
protolist.append(proto)
instr = instr[l + 1:]
# Call the callback
outstr = callback(conn, protolist)
# Save our callback arguments on the connection object. This is
# done to make sure that they don't get freed before OpenSSL
# uses them. Then, return them appropriately in the output
# parameters.
conn._npn_select_callback_args = [
_ffi.new("unsigned char *", len(outstr)),
_ffi.new("unsigned char[]", outstr),
]
outlen[0] = conn._npn_select_callback_args[0][0]
out[0] = conn._npn_select_callback_args[1]
return 0
except Exception as e:
self._problems.append(e)
return 2 # SSL_TLSEXT_ERR_ALERT_FATAL
self.callback = _ffi.callback(
("int (*)(SSL *, unsigned char **, unsigned char *, "
"const unsigned char *, unsigned int, void *)"),
wrapper
)
class _ALPNSelectHelper(_CallbackExceptionHelper):
"""
Wrap a callback such that it can be used as an ALPN selection callback.
"""
def __init__(self, callback):
_CallbackExceptionHelper.__init__(self)
@wraps(callback)
def wrapper(ssl, out, outlen, in_, inlen, arg):
try:
conn = Connection._reverse_mapping[ssl]
# The string passed to us is made up of multiple
# length-prefixed bytestrings. We need to split that into a
# list.
instr = _ffi.buffer(in_, inlen)[:]
protolist = []
while instr:
encoded_len = indexbytes(instr, 0)
proto = instr[1:encoded_len + 1]
protolist.append(proto)
instr = instr[encoded_len + 1:]
# Call the callback
outstr = callback(conn, protolist)
if not isinstance(outstr, _binary_type):
raise TypeError("ALPN callback must return a bytestring.")
# Save our callback arguments on the connection object to make
# sure that they don't get freed before OpenSSL can use them.
# Then, return them in the appropriate output parameters.
conn._alpn_select_callback_args = [
_ffi.new("unsigned char *", len(outstr)),
_ffi.new("unsigned char[]", outstr),
]
outlen[0] = conn._alpn_select_callback_args[0][0]
out[0] = conn._alpn_select_callback_args[1]
return 0
except Exception as e:
self._problems.append(e)
return 2 # SSL_TLSEXT_ERR_ALERT_FATAL
self.callback = _ffi.callback(
("int (*)(SSL *, unsigned char **, unsigned char *, "
"const unsigned char *, unsigned int, void *)"),
wrapper
)
def _asFileDescriptor(obj):
fd = None
if not isinstance(obj, integer_types):
meth = getattr(obj, "fileno", None)
if meth is not None:
obj = meth()
if isinstance(obj, integer_types):
fd = obj
if not isinstance(fd, integer_types):
raise TypeError("argument must be an int, or have a fileno() method.")
elif fd < 0:
raise ValueError(
"file descriptor cannot be a negative integer (%i)" % (fd,))
return fd
def SSLeay_version(type):
"""
Return a string describing the version of OpenSSL in use.
:param type: One of the SSLEAY_ constants defined in this module.
"""
return _ffi.string(_lib.SSLeay_version(type))
def _make_requires(flag, error):
"""
Builds a decorator that ensures that functions that rely on OpenSSL
functions that are not present in this build raise NotImplementedError,
rather than AttributeError coming out of cryptography.
:param flag: A cryptography flag that guards the functions, e.g.
``Cryptography_HAS_NEXTPROTONEG``.
:param error: The string to be used in the exception if the flag is false.
"""
def _requires_decorator(func):
if not flag:
@wraps(func)
def explode(*args, **kwargs):
raise NotImplementedError(error)
return explode
else:
return func
return _requires_decorator
_requires_npn = _make_requires(
_lib.Cryptography_HAS_NEXTPROTONEG, "NPN not available"
)
_requires_alpn = _make_requires(
_lib.Cryptography_HAS_ALPN, "ALPN not available"
)
_requires_sni = _make_requires(
_lib.Cryptography_HAS_TLSEXT_HOSTNAME, "SNI not available"
)
class Session(object):
pass
class Context(object):
"""
:class:`OpenSSL.SSL.Context` instances define the parameters for setting
up new SSL connections.
"""
_methods = {
SSLv2_METHOD: "SSLv2_method",
SSLv3_METHOD: "SSLv3_method",
SSLv23_METHOD: "SSLv23_method",
TLSv1_METHOD: "TLSv1_method",
TLSv1_1_METHOD: "TLSv1_1_method",
TLSv1_2_METHOD: "TLSv1_2_method",
}
_methods = dict(
(identifier, getattr(_lib, name))
for (identifier, name) in _methods.items()
if getattr(_lib, name, None) is not None)
def __init__(self, method):
"""
:param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, or
TLSv1_METHOD.
"""
if not isinstance(method, integer_types):
raise TypeError("method must be an integer")
try:
method_func = self._methods[method]
except KeyError:
raise ValueError("No such protocol")
method_obj = method_func()
_openssl_assert(method_obj != _ffi.NULL)
context = _lib.SSL_CTX_new(method_obj)
_openssl_assert(context != _ffi.NULL)
context = _ffi.gc(context, _lib.SSL_CTX_free)
self._context = context
self._passphrase_helper = None
self._passphrase_callback = None
self._passphrase_userdata = None
self._verify_helper = None
self._verify_callback = None
self._info_callback = None
self._tlsext_servername_callback = None
self._app_data = None
self._npn_advertise_helper = None
self._npn_advertise_callback = None
self._npn_select_helper = None
self._npn_select_callback = None
self._alpn_select_helper = None
self._alpn_select_callback = None
# SSL_CTX_set_app_data(self->ctx, self);
# SSL_CTX_set_mode(self->ctx, SSL_MODE_ENABLE_PARTIAL_WRITE |
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |
# SSL_MODE_AUTO_RETRY);
self.set_mode(_lib.SSL_MODE_ENABLE_PARTIAL_WRITE)
def load_verify_locations(self, cafile, capath=None):
"""
Let SSL know where we can find trusted certificates for the certificate
chain
:param cafile: In which file we can find the certificates (``bytes`` or
``unicode``).
:param capath: In which directory we can find the certificates
(``bytes`` or ``unicode``).
:return: None
"""
if cafile is None:
cafile = _ffi.NULL
else:
cafile = _path_string(cafile)
if capath is None:
capath = _ffi.NULL
else:
capath = _path_string(capath)
load_result = _lib.SSL_CTX_load_verify_locations(
self._context, cafile, capath
)
if not load_result:
_raise_current_error()
def _wrap_callback(self, callback):
@wraps(callback)
def wrapper(size, verify, userdata):
return callback(size, verify, self._passphrase_userdata)
return _PassphraseHelper(
FILETYPE_PEM, wrapper, more_args=True, truncate=True)
def set_passwd_cb(self, callback, userdata=None):
"""
Set the passphrase callback
:param callback: The Python callback to use
:param userdata: (optional) A Python object which will be given as
argument to the callback
:return: None
"""
if not callable(callback):
raise TypeError("callback must be callable")
self._passphrase_helper = self._wrap_callback(callback)
self._passphrase_callback = self._passphrase_helper.callback
_lib.SSL_CTX_set_default_passwd_cb(
self._context, self._passphrase_callback)
self._passphrase_userdata = userdata
def set_default_verify_paths(self):
"""
Use the platform-specific CA certificate locations
:return: None
"""
set_result = _lib.SSL_CTX_set_default_verify_paths(self._context)
_openssl_assert(set_result == 1)
def use_certificate_chain_file(self, certfile):
"""
Load a certificate chain from a file
:param certfile: The name of the certificate chain file (``bytes`` or
``unicode``).
:return: None
"""
certfile = _path_string(certfile)
result = _lib.SSL_CTX_use_certificate_chain_file(
self._context, certfile
)
if not result:
_raise_current_error()
def use_certificate_file(self, certfile, filetype=FILETYPE_PEM):
"""
Load a certificate from a file
:param certfile: The name of the certificate file (``bytes`` or
``unicode``).
:param filetype: (optional) The encoding of the file, default is PEM
:return: None
"""
certfile = _path_string(certfile)
if not isinstance(filetype, integer_types):
raise TypeError("filetype must be an integer")
use_result = _lib.SSL_CTX_use_certificate_file(
self._context, certfile, filetype
)
if not use_result:
_raise_current_error()
def use_certificate(self, cert):
"""
Load a certificate from a X509 object
:param cert: The X509 object
:return: None
"""
if not isinstance(cert, X509):
raise TypeError("cert must be an X509 instance")
use_result = _lib.SSL_CTX_use_certificate(self._context, cert._x509)
if not use_result:
_raise_current_error()
def add_extra_chain_cert(self, certobj):
"""
Add certificate to chain
:param certobj: The X509 certificate object to add to the chain
:return: None
"""
if not isinstance(certobj, X509):
raise TypeError("certobj must be an X509 instance")
copy = _lib.X509_dup(certobj._x509)
add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy)
if not add_result:
# TODO: This is untested.
_lib.X509_free(copy)
_raise_current_error()
def _raise_passphrase_exception(self):
if self._passphrase_helper is None:
_raise_current_error()
exception = self._passphrase_helper.raise_if_problem(Error)
if exception is not None:
raise exception
def use_privatekey_file(self, keyfile, filetype=_UNSPECIFIED):
"""
Load a private key from a file
:param keyfile: The name of the key file (``bytes`` or ``unicode``)
:param filetype: (optional) The encoding of the file, default is PEM
:return: None
"""
keyfile = _path_string(keyfile)
if filetype is _UNSPECIFIED:
filetype = FILETYPE_PEM
elif not isinstance(filetype, integer_types):
raise TypeError("filetype must be an integer")
use_result = _lib.SSL_CTX_use_PrivateKey_file(
self._context, keyfile, filetype)
if not use_result:
self._raise_passphrase_exception()
def use_privatekey(self, pkey):
"""
Load a private key from a PKey object
:param pkey: The PKey object
:return: None
"""
if not isinstance(pkey, PKey):
raise TypeError("pkey must be a PKey instance")
use_result = _lib.SSL_CTX_use_PrivateKey(self._context, pkey._pkey)
if not use_result:
self._raise_passphrase_exception()
def check_privatekey(self):
"""
Check that the private key and certificate match up
:return: None (raises an exception if something's wrong)
"""
if not _lib.SSL_CTX_check_private_key(self._context):
_raise_current_error()
def load_client_ca(self, cafile):
"""
Load the trusted certificates that will be sent to the client. Does
not actually imply any of the certificates are trusted; that must be
configured separately.
:param bytes cafile: The path to a certificates file in PEM format.
:return: None
"""
ca_list = _lib.SSL_load_client_CA_file(
_text_to_bytes_and_warn("cafile", cafile)
)
_openssl_assert(ca_list != _ffi.NULL)
# SSL_CTX_set_client_CA_list doesn't return anything.
_lib.SSL_CTX_set_client_CA_list(self._context, ca_list)
def set_session_id(self, buf):
"""
Set the session id to *buf* within which a session can be reused for
this Context object. This is needed when doing session resumption,
because there is no way for a stored session to know which Context
object it is associated with.
:param bytes buf: The session id.
:returns: None
"""
buf = _text_to_bytes_and_warn("buf", buf)
_openssl_assert(
_lib.SSL_CTX_set_session_id_context(
self._context,
buf,
len(buf),
) == 1
)
def set_session_cache_mode(self, mode):
"""
Enable/disable session caching and specify the mode used.
:param mode: One or more of the SESS_CACHE_* flags (combine using
bitwise or)
:returns: The previously set caching mode.
"""
if not isinstance(mode, integer_types):
raise TypeError("mode must be an integer")
return _lib.SSL_CTX_set_session_cache_mode(self._context, mode)
def get_session_cache_mode(self):
"""
:returns: The currently used cache mode.
"""
return _lib.SSL_CTX_get_session_cache_mode(self._context)
def set_verify(self, mode, callback):
"""
Set the verify mode and verify callback
:param mode: The verify mode, this is either VERIFY_NONE or
VERIFY_PEER combined with possible other flags
:param callback: The Python callback to use
:return: None
See SSL_CTX_set_verify(3SSL) for further details.
"""
if not isinstance(mode, integer_types):
raise TypeError("mode must be an integer")
if not callable(callback):
raise TypeError("callback must be callable")
self._verify_helper = _VerifyHelper(callback)
self._verify_callback = self._verify_helper.callback
_lib.SSL_CTX_set_verify(self._context, mode, self._verify_callback)
def set_verify_depth(self, depth):
"""
Set the verify depth
:param depth: An integer specifying the verify depth
:return: None
"""
if not isinstance(depth, integer_types):
raise TypeError("depth must be an integer")
_lib.SSL_CTX_set_verify_depth(self._context, depth)
def get_verify_mode(self):
"""
Get the verify mode
:return: The verify mode
"""
return _lib.SSL_CTX_get_verify_mode(self._context)
def get_verify_depth(self):
"""
Get the verify depth
:return: The verify depth
"""
return _lib.SSL_CTX_get_verify_depth(self._context)
def load_tmp_dh(self, dhfile):
"""
Load parameters for Ephemeral Diffie-Hellman
:param dhfile: The file to load EDH parameters from (``bytes`` or
``unicode``).
:return: None
"""
dhfile = _path_string(dhfile)
bio = _lib.BIO_new_file(dhfile, b"r")
if bio == _ffi.NULL:
_raise_current_error()
bio = _ffi.gc(bio, _lib.BIO_free)
dh = _lib.PEM_read_bio_DHparams(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
dh = _ffi.gc(dh, _lib.DH_free)
_lib.SSL_CTX_set_tmp_dh(self._context, dh)
def set_tmp_ecdh(self, curve):
"""
Select a curve to use for ECDHE key exchange.
:param curve: A curve object to use as returned by either
:py:meth:`OpenSSL.crypto.get_elliptic_curve` or
:py:meth:`OpenSSL.crypto.get_elliptic_curves`.
:return: None
"""
_lib.SSL_CTX_set_tmp_ecdh(self._context, curve._to_EC_KEY())
def set_cipher_list(self, cipher_list):
"""
Set the list of ciphers to be used in this context.
See the OpenSSL manual for more information (e.g.
:manpage:`ciphers(1)`).
:param bytes cipher_list: An OpenSSL cipher string.
:return: None
"""
cipher_list = _text_to_bytes_and_warn("cipher_list", cipher_list)
if not isinstance(cipher_list, bytes):
raise TypeError("cipher_list must be a byte string.")
_openssl_assert(
_lib.SSL_CTX_set_cipher_list(self._context, cipher_list) == 1
)
def set_client_ca_list(self, certificate_authorities):
"""
Set the list of preferred client certificate signers for this server
context.
This list of certificate authorities will be sent to the client when
the server requests a client certificate.
:param certificate_authorities: a sequence of X509Names.
:return: None
"""
name_stack = _lib.sk_X509_NAME_new_null()
_openssl_assert(name_stack != _ffi.NULL)
try:
for ca_name in certificate_authorities:
if not isinstance(ca_name, X509Name):
raise TypeError(
"client CAs must be X509Name objects, not %s "
"objects" % (
type(ca_name).__name__,
)
)
copy = _lib.X509_NAME_dup(ca_name._name)
_openssl_assert(copy != _ffi.NULL)
push_result = _lib.sk_X509_NAME_push(name_stack, copy)
if not push_result:
_lib.X509_NAME_free(copy)
_raise_current_error()
except:
_lib.sk_X509_NAME_free(name_stack)
raise
_lib.SSL_CTX_set_client_CA_list(self._context, name_stack)
def add_client_ca(self, certificate_authority):
"""
Add the CA certificate to the list of preferred signers for this
context.
The list of certificate authorities will be sent to the client when the
server requests a client certificate.
:param certificate_authority: certificate authority's X509 certificate.
:return: None
"""
if not isinstance(certificate_authority, X509):
raise TypeError("certificate_authority must be an X509 instance")
add_result = _lib.SSL_CTX_add_client_CA(
self._context, certificate_authority._x509)
_openssl_assert(add_result == 1)
def set_timeout(self, timeout):
"""
Set session timeout
:param timeout: The timeout in seconds
:return: The previous session timeout
"""
if not isinstance(timeout, integer_types):
raise TypeError("timeout must be an integer")
return _lib.SSL_CTX_set_timeout(self._context, timeout)
def get_timeout(self):
"""
Get the session timeout
:return: The session timeout
"""
return _lib.SSL_CTX_get_timeout(self._context)
def set_info_callback(self, callback):
"""
Set the info callback
:param callback: The Python callback to use
:return: None
"""
@wraps(callback)
def wrapper(ssl, where, return_code):
callback(Connection._reverse_mapping[ssl], where, return_code)
self._info_callback = _ffi.callback(
"void (*)(const SSL *, int, int)", wrapper)
_lib.SSL_CTX_set_info_callback(self._context, self._info_callback)
def get_app_data(self):
"""
Get the application data (supplied via set_app_data())
:return: The application data
"""
return self._app_data
def set_app_data(self, data):
"""
Set the application data (will be returned from get_app_data())
:param data: Any Python object
:return: None
"""
self._app_data = data
def get_cert_store(self):
"""
Get the certificate store for the context.
:return: A X509Store object or None if it does not have one.
"""
store = _lib.SSL_CTX_get_cert_store(self._context)
if store == _ffi.NULL:
# TODO: This is untested.
return None
pystore = X509Store.__new__(X509Store)
pystore._store = store
return pystore
def set_options(self, options):
"""
Add options. Options set before are not cleared!
:param options: The options to add.
:return: The new option bitmask.
"""
if not isinstance(options, integer_types):
raise TypeError("options must be an integer")
return _lib.SSL_CTX_set_options(self._context, options)
def set_mode(self, mode):
"""
Add modes via bitmask. Modes set before are not cleared!
:param mode: The mode to add.
:return: The new mode bitmask.
"""
if not isinstance(mode, integer_types):
raise TypeError("mode must be an integer")
return _lib.SSL_CTX_set_mode(self._context, mode)
@_requires_sni
def set_tlsext_servername_callback(self, callback):
"""
Specify a callback function to be called when clients specify a server
name.
:param callback: The callback function. It will be invoked with one
argument, the Connection instance.
"""
@wraps(callback)
def wrapper(ssl, alert, arg):
callback(Connection._reverse_mapping[ssl])
return 0
self._tlsext_servername_callback = _ffi.callback(
"int (*)(const SSL *, int *, void *)", wrapper)
_lib.SSL_CTX_set_tlsext_servername_callback(
self._context, self._tlsext_servername_callback)
@_requires_npn
def set_npn_advertise_callback(self, callback):
"""
Specify a callback function that will be called when offering `Next
Protocol Negotiation
<https://technotes.googlecode.com/git/nextprotoneg.html>`_ as a server.
:param callback: The callback function. It will be invoked with one
argument, the Connection instance. It should return a list of
bytestrings representing the advertised protocols, like
``[b'http/1.1', b'spdy/2']``.
"""
self._npn_advertise_helper = _NpnAdvertiseHelper(callback)
self._npn_advertise_callback = self._npn_advertise_helper.callback
_lib.SSL_CTX_set_next_protos_advertised_cb(
self._context, self._npn_advertise_callback, _ffi.NULL)
@_requires_npn
def set_npn_select_callback(self, callback):
"""
Specify a callback function that will be called when a server offers
Next Protocol Negotiation options.
:param callback: The callback function. It will be invoked with two
arguments: the Connection, and a list of offered protocols as
bytestrings, e.g. ``[b'http/1.1', b'spdy/2']``. It should return
one of those bytestrings, the chosen protocol.
"""
self._npn_select_helper = _NpnSelectHelper(callback)
self._npn_select_callback = self._npn_select_helper.callback
_lib.SSL_CTX_set_next_proto_select_cb(
self._context, self._npn_select_callback, _ffi.NULL)
@_requires_alpn
def set_alpn_protos(self, protos):
"""
Specify the clients ALPN protocol list.
These protocols are offered to the server during protocol negotiation.
:param protos: A list of the protocols to be offered to the server.
This list should be a Python list of bytestrings representing the
protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
"""
# Take the list of protocols and join them together, prefixing them
# with their lengths.
protostr = b''.join(
chain.from_iterable((int2byte(len(p)), p) for p in protos)
)
# Build a C string from the list. We don't need to save this off
# because OpenSSL immediately copies the data out.
input_str = _ffi.new("unsigned char[]", protostr)
input_str_len = _ffi.cast("unsigned", len(protostr))
_lib.SSL_CTX_set_alpn_protos(self._context, input_str, input_str_len)
@_requires_alpn
def set_alpn_select_callback(self, callback):
"""
Set the callback to handle ALPN protocol choice.
:param callback: The callback function. It will be invoked with two
arguments: the Connection, and a list of offered protocols as
bytestrings, e.g ``[b'http/1.1', b'spdy/2']``. It should return
one of those bytestrings, the chosen protocol.
"""
self._alpn_select_helper = _ALPNSelectHelper(callback)
self._alpn_select_callback = self._alpn_select_helper.callback
_lib.SSL_CTX_set_alpn_select_cb(
self._context, self._alpn_select_callback, _ffi.NULL)
ContextType = Context
class Connection(object):
"""
"""
_reverse_mapping = WeakValueDictionary()
def __init__(self, context, socket=None):
"""
Create a new Connection object, using the given OpenSSL.SSL.Context
instance and socket.
:param context: An SSL Context to use for this connection
:param socket: The socket to use for transport layer
"""
if not isinstance(context, Context):
raise TypeError("context must be a Context instance")
ssl = _lib.SSL_new(context._context)
self._ssl = _ffi.gc(ssl, _lib.SSL_free)
self._context = context
self._app_data = None
# References to strings used for Next Protocol Negotiation. OpenSSL's
# header files suggest that these might get copied at some point, but
# doesn't specify when, so we store them here to make sure they don't
# get freed before OpenSSL uses them.
self._npn_advertise_callback_args = None
self._npn_select_callback_args = None
# References to strings used for Application Layer Protocol
# Negotiation. These strings get copied at some point but it's well
# after the callback returns, so we have to hang them somewhere to
# avoid them getting freed.
self._alpn_select_callback_args = None
self._reverse_mapping[self._ssl] = self
if socket is None:
self._socket = None
# Don't set up any gc for these, SSL_free will take care of them.
self._into_ssl = _lib.BIO_new(_lib.BIO_s_mem())
_openssl_assert(self._into_ssl != _ffi.NULL)
self._from_ssl = _lib.BIO_new(_lib.BIO_s_mem())
_openssl_assert(self._from_ssl != _ffi.NULL)
_lib.SSL_set_bio(self._ssl, self._into_ssl, self._from_ssl)
else:
self._into_ssl = None
self._from_ssl = None
self._socket = socket
set_result = _lib.SSL_set_fd(
self._ssl, _asFileDescriptor(self._socket))
_openssl_assert(set_result == 1)
def __getattr__(self, name):
"""
Look up attributes on the wrapped socket object if they are not found
on the Connection object.
"""
if self._socket is None:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__.__name__, name
))
else:
return getattr(self._socket, name)
def _raise_ssl_error(self, ssl, result):
if self._context._verify_helper is not None:
self._context._verify_helper.raise_if_problem()
if self._context._npn_advertise_helper is not None:
self._context._npn_advertise_helper.raise_if_problem()
if self._context._npn_select_helper is not None:
self._context._npn_select_helper.raise_if_problem()
if self._context._alpn_select_helper is not None:
self._context._alpn_select_helper.raise_if_problem()
error = _lib.SSL_get_error(ssl, result)
if error == _lib.SSL_ERROR_WANT_READ:
raise WantReadError()
elif error == _lib.SSL_ERROR_WANT_WRITE:
raise WantWriteError()
elif error == _lib.SSL_ERROR_ZERO_RETURN:
raise ZeroReturnError()
elif error == _lib.SSL_ERROR_WANT_X509_LOOKUP:
# TODO: This is untested.
raise WantX509LookupError()
elif error == _lib.SSL_ERROR_SYSCALL:
if _lib.ERR_peek_error() == 0:
if result < 0:
if platform == "win32":
errno = _ffi.getwinerror()[0]
else:
errno = _ffi.errno
if errno != 0:
raise SysCallError(errno, errorcode.get(errno))
raise SysCallError(-1, "Unexpected EOF")
else:
# TODO: This is untested.
_raise_current_error()
elif error == _lib.SSL_ERROR_NONE:
pass
else:
_raise_current_error()
def get_context(self):
"""
Get session context
"""
return self._context
def set_context(self, context):
"""
Switch this connection to a new session context
:param context: A :py:class:`Context` instance giving the new session
context to use.
"""
if not isinstance(context, Context):
raise TypeError("context must be a Context instance")
_lib.SSL_set_SSL_CTX(self._ssl, context._context)
self._context = context
@_requires_sni
def get_servername(self):
"""
Retrieve the servername extension value if provided in the client hello
message, or None if there wasn't one.
:return: A byte string giving the server name or :py:data:`None`.
"""
name = _lib.SSL_get_servername(
self._ssl, _lib.TLSEXT_NAMETYPE_host_name
)
if name == _ffi.NULL:
return None
return _ffi.string(name)
@_requires_sni
def set_tlsext_host_name(self, name):
"""
Set the value of the servername extension to send in the client hello.
:param name: A byte string giving the name.
"""
if not isinstance(name, bytes):
raise TypeError("name must be a byte string")
elif b"\0" in name:
raise TypeError("name must not contain NUL byte")
# XXX I guess this can fail sometimes?
_lib.SSL_set_tlsext_host_name(self._ssl, name)
def pending(self):
"""
Get the number of bytes that can be safely read from the connection
:return: The number of bytes available in the receive buffer.
"""
return _lib.SSL_pending(self._ssl)
def send(self, buf, flags=0):
"""
Send data on the connection. NOTE: If you get one of the WantRead,
WantWrite or WantX509Lookup exceptions on this, you have to call the
method again with the SAME buffer.
:param buf: The string, buffer or memoryview to send
:param flags: (optional) Included for compatibility with the socket
API, the value is ignored
:return: The number of bytes written
"""
# Backward compatibility
buf = _text_to_bytes_and_warn("buf", buf)
if isinstance(buf, _memoryview):
buf = buf.tobytes()
if isinstance(buf, _buffer):
buf = str(buf)
if not isinstance(buf, bytes):
raise TypeError("data must be a memoryview, buffer or byte string")
result = _lib.SSL_write(self._ssl, buf, len(buf))
self._raise_ssl_error(self._ssl, result)
return result
write = send
def sendall(self, buf, flags=0):
"""
Send "all" data on the connection. This calls send() repeatedly until
all data is sent. If an error occurs, it's impossible to tell how much
data has been sent.
:param buf: The string, buffer or memoryview to send
:param flags: (optional) Included for compatibility with the socket
API, the value is ignored
:return: The number of bytes written
"""
buf = _text_to_bytes_and_warn("buf", buf)
if isinstance(buf, _memoryview):
buf = buf.tobytes()
if isinstance(buf, _buffer):
buf = str(buf)
if not isinstance(buf, bytes):
raise TypeError("buf must be a memoryview, buffer or byte string")
left_to_send = len(buf)
total_sent = 0
data = _ffi.new("char[]", buf)
while left_to_send:
result = _lib.SSL_write(self._ssl, data + total_sent, left_to_send)
self._raise_ssl_error(self._ssl, result)
total_sent += result
left_to_send -= result
def recv(self, bufsiz, flags=None):
"""
Receive data on the connection.
:param bufsiz: The maximum number of bytes to read
:param flags: (optional) The only supported flag is ``MSG_PEEK``,
all other flags are ignored.
:return: The string read from the Connection
"""
buf = _ffi.new("char[]", bufsiz)
if flags is not None and flags & socket.MSG_PEEK:
result = _lib.SSL_peek(self._ssl, buf, bufsiz)
else:
result = _lib.SSL_read(self._ssl, buf, bufsiz)
self._raise_ssl_error(self._ssl, result)
return _ffi.buffer(buf, result)[:]
read = recv
def recv_into(self, buffer, nbytes=None, flags=None):
"""
Receive data on the connection and store the data into a buffer rather
than creating a new string.
:param buffer: The buffer to copy into.
:param nbytes: (optional) The maximum number of bytes to read into the
buffer. If not present, defaults to the size of the buffer. If
larger than the size of the buffer, is reduced to the size of the
buffer.
:param flags: (optional) The only supported flag is ``MSG_PEEK``,
all other flags are ignored.
:return: The number of bytes read into the buffer.
"""
if nbytes is None:
nbytes = len(buffer)
else:
nbytes = min(nbytes, len(buffer))
# We need to create a temporary buffer. This is annoying, it would be
# better if we could pass memoryviews straight into the SSL_read call,
# but right now we can't. Revisit this if CFFI gets that ability.
buf = _ffi.new("char[]", nbytes)
if flags is not None and flags & socket.MSG_PEEK:
result = _lib.SSL_peek(self._ssl, buf, nbytes)
else:
result = _lib.SSL_read(self._ssl, buf, nbytes)
self._raise_ssl_error(self._ssl, result)
# This strange line is all to avoid a memory copy. The buffer protocol
# should allow us to assign a CFFI buffer to the LHS of this line, but
# on CPython 3.3+ that segfaults. As a workaround, we can temporarily
# wrap it in a memoryview, except on Python 2.6 which doesn't have a
# memoryview type.
try:
buffer[:result] = memoryview(_ffi.buffer(buf, result))
except NameError:
buffer[:result] = _ffi.buffer(buf, result)
return result
def _handle_bio_errors(self, bio, result):
if _lib.BIO_should_retry(bio):
if _lib.BIO_should_read(bio):
raise WantReadError()
elif _lib.BIO_should_write(bio):
# TODO: This is untested.
raise WantWriteError()
elif _lib.BIO_should_io_special(bio):
# TODO: This is untested. I think io_special means the socket
# BIO has a not-yet connected socket.
raise ValueError("BIO_should_io_special")
else:
# TODO: This is untested.
raise ValueError("unknown bio failure")
else:
# TODO: This is untested.
_raise_current_error()
def bio_read(self, bufsiz):
"""
When using non-socket connections this function reads the "dirty" data
that would have traveled away on the network.
:param bufsiz: The maximum number of bytes to read
:return: The string read.
"""
if self._from_ssl is None:
raise TypeError("Connection sock was not None")
if not isinstance(bufsiz, integer_types):
raise TypeError("bufsiz must be an integer")
buf = _ffi.new("char[]", bufsiz)
result = _lib.BIO_read(self._from_ssl, buf, bufsiz)
if result <= 0:
self._handle_bio_errors(self._from_ssl, result)
return _ffi.buffer(buf, result)[:]
def bio_write(self, buf):
"""
When using non-socket connections this function sends "dirty" data that
would have traveled in on the network.
:param buf: The string to put into the memory BIO.
:return: The number of bytes written
"""
buf = _text_to_bytes_and_warn("buf", buf)
if self._into_ssl is None:
raise TypeError("Connection sock was not None")
result = _lib.BIO_write(self._into_ssl, buf, len(buf))
if result <= 0:
self._handle_bio_errors(self._into_ssl, result)
return result
def renegotiate(self):
"""
Renegotiate the session.
:return: True if the renegotiation can be started, False otherwise
:rtype: bool
"""
if not self.renegotiate_pending():
_openssl_assert(_lib.SSL_renegotiate(self._ssl) == 1)
return True
return False
def do_handshake(self):
"""
Perform an SSL handshake (usually called after renegotiate() or one of
set_*_state()). This can raise the same exceptions as send and recv.
:return: None.
"""
result = _lib.SSL_do_handshake(self._ssl)
self._raise_ssl_error(self._ssl, result)
def renegotiate_pending(self):
"""
Check if there's a renegotiation in progress, it will return False once
a renegotiation is finished.
:return: Whether there's a renegotiation in progress
:rtype: bool
"""
return _lib.SSL_renegotiate_pending(self._ssl) == 1
def total_renegotiations(self):
"""
Find out the total number of renegotiations.
:return: The number of renegotiations.
:rtype: int
"""
return _lib.SSL_total_renegotiations(self._ssl)
def connect(self, addr):
"""
Connect to remote host and set up client-side SSL
:param addr: A remote address
:return: What the socket's connect method returns
"""
_lib.SSL_set_connect_state(self._ssl)
return self._socket.connect(addr)
def connect_ex(self, addr):
"""
Connect to remote host and set up client-side SSL. Note that if the
socket's connect_ex method doesn't return 0, SSL won't be initialized.
:param addr: A remove address
:return: What the socket's connect_ex method returns
"""
connect_ex = self._socket.connect_ex
self.set_connect_state()
return connect_ex(addr)
def accept(self):
"""
Accept incoming connection and set up SSL on it
:return: A (conn,addr) pair where conn is a Connection and addr is an
address
"""
client, addr = self._socket.accept()
conn = Connection(self._context, client)
conn.set_accept_state()
return (conn, addr)
def bio_shutdown(self):
"""
When using non-socket connections this function signals end of
data on the input for this connection.
:return: None
"""
if self._from_ssl is None:
raise TypeError("Connection sock was not None")
_lib.BIO_set_mem_eof_return(self._into_ssl, 0)
def shutdown(self):
"""
Send closure alert
:return: True if the shutdown completed successfully (i.e. both sides
have sent closure alerts), false otherwise (i.e. you have to
wait for a ZeroReturnError on a recv() method call
"""
result = _lib.SSL_shutdown(self._ssl)
if result < 0:
self._raise_ssl_error(self._ssl, result)
elif result > 0:
return True
else:
return False
def get_cipher_list(self):
"""
Retrieve the list of ciphers used by the Connection object.
:return: A list of native cipher strings.
"""
ciphers = []
for i in count():
result = _lib.SSL_get_cipher_list(self._ssl, i)
if result == _ffi.NULL:
break
ciphers.append(_native(_ffi.string(result)))
return ciphers
def get_client_ca_list(self):
"""
Get CAs whose certificates are suggested for client authentication.
:return: If this is a server connection, a list of X509Names
representing the acceptable CAs as set by
:py:meth:`OpenSSL.SSL.Context.set_client_ca_list` or
:py:meth:`OpenSSL.SSL.Context.add_client_ca`. If this is a client
connection, the list of such X509Names sent by the server, or an
empty list if that has not yet happened.
"""
ca_names = _lib.SSL_get_client_CA_list(self._ssl)
if ca_names == _ffi.NULL:
# TODO: This is untested.
return []
result = []
for i in range(_lib.sk_X509_NAME_num(ca_names)):
name = _lib.sk_X509_NAME_value(ca_names, i)
copy = _lib.X509_NAME_dup(name)
_openssl_assert(copy != _ffi.NULL)
pyname = X509Name.__new__(X509Name)
pyname._name = _ffi.gc(copy, _lib.X509_NAME_free)
result.append(pyname)
return result
def makefile(self):
"""
The makefile() method is not implemented, since there is no dup
semantics for SSL connections
:raise: NotImplementedError
"""
raise NotImplementedError(
"Cannot make file object of OpenSSL.SSL.Connection")
def get_app_data(self):
"""
Get application data
:return: The application data
"""
return self._app_data
def set_app_data(self, data):
"""
Set application data
:param data - The application data
:return: None
"""
self._app_data = data
def get_shutdown(self):
"""
Get shutdown state
:return: The shutdown state, a bitvector of SENT_SHUTDOWN,
RECEIVED_SHUTDOWN.
"""
return _lib.SSL_get_shutdown(self._ssl)
def set_shutdown(self, state):
"""
Set shutdown state
:param state - bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN.
:return: None
"""
if not isinstance(state, integer_types):
raise TypeError("state must be an integer")
_lib.SSL_set_shutdown(self._ssl, state)
def get_state_string(self):
"""
Retrieve a verbose string detailing the state of the Connection.
:return: A string representing the state
:rtype: bytes
"""
return _ffi.string(_lib.SSL_state_string_long(self._ssl))
def server_random(self):
"""
Get a copy of the server hello nonce.
:return: A string representing the state
"""
session = _lib.SSL_get_session(self._ssl)
if session == _ffi.NULL:
return None
length = _lib.SSL_get_server_random(self._ssl, _ffi.NULL, 0)
assert length > 0
outp = _ffi.new("unsigned char[]", length)
_lib.SSL_get_server_random(self._ssl, outp, length)
return _ffi.buffer(outp, length)[:]
def client_random(self):
"""
Get a copy of the client hello nonce.
:return: A string representing the state
"""
session = _lib.SSL_get_session(self._ssl)
if session == _ffi.NULL:
return None
length = _lib.SSL_get_client_random(self._ssl, _ffi.NULL, 0)
assert length > 0
outp = _ffi.new("unsigned char[]", length)
_lib.SSL_get_client_random(self._ssl, outp, length)
return _ffi.buffer(outp, length)[:]
def master_key(self):
"""
Get a copy of the master key.
:return: A string representing the state
"""
session = _lib.SSL_get_session(self._ssl)
if session == _ffi.NULL:
return None
length = _lib.SSL_SESSION_get_master_key(session, _ffi.NULL, 0)
assert length > 0
outp = _ffi.new("unsigned char[]", length)
_lib.SSL_SESSION_get_master_key(session, outp, length)
return _ffi.buffer(outp, length)[:]
def sock_shutdown(self, *args, **kwargs):
"""
See shutdown(2)
:return: What the socket's shutdown() method returns
"""
return self._socket.shutdown(*args, **kwargs)
def get_peer_certificate(self):
"""
Retrieve the other side's certificate (if any)
:return: The peer's certificate
"""
cert = _lib.SSL_get_peer_certificate(self._ssl)
if cert != _ffi.NULL:
pycert = X509.__new__(X509)
pycert._x509 = _ffi.gc(cert, _lib.X509_free)
return pycert
return None
def get_peer_cert_chain(self):
"""
Retrieve the other side's certificate (if any)
:return: A list of X509 instances giving the peer's certificate chain,
or None if it does not have one.
"""
cert_stack = _lib.SSL_get_peer_cert_chain(self._ssl)
if cert_stack == _ffi.NULL:
return None
result = []
for i in range(_lib.sk_X509_num(cert_stack)):
# TODO could incref instead of dup here
cert = _lib.X509_dup(_lib.sk_X509_value(cert_stack, i))
pycert = X509.__new__(X509)
pycert._x509 = _ffi.gc(cert, _lib.X509_free)
result.append(pycert)
return result
def want_read(self):
"""
Checks if more data has to be read from the transport layer to complete
an operation.
:return: True iff more data has to be read
"""
return _lib.SSL_want_read(self._ssl)
def want_write(self):
"""
Checks if there is data to write to the transport layer to complete an
operation.
:return: True iff there is data to write
"""
return _lib.SSL_want_write(self._ssl)
def set_accept_state(self):
"""
Set the connection to work in server mode. The handshake will be
handled automatically by read/write.
:return: None
"""
_lib.SSL_set_accept_state(self._ssl)
def set_connect_state(self):
"""
Set the connection to work in client mode. The handshake will be
handled automatically by read/write.
:return: None
"""
_lib.SSL_set_connect_state(self._ssl)
def get_session(self):
"""
Returns the Session currently used.
@return: An instance of :py:class:`OpenSSL.SSL.Session` or
:py:obj:`None` if no session exists.
"""
session = _lib.SSL_get1_session(self._ssl)
if session == _ffi.NULL:
return None
pysession = Session.__new__(Session)
pysession._session = _ffi.gc(session, _lib.SSL_SESSION_free)
return pysession
def set_session(self, session):
"""
Set the session to be used when the TLS/SSL connection is established.
:param session: A Session instance representing the session to use.
:returns: None
"""
if not isinstance(session, Session):
raise TypeError("session must be a Session instance")
result = _lib.SSL_set_session(self._ssl, session._session)
if not result:
_raise_current_error()
def _get_finished_message(self, function):
"""
Helper to implement :py:meth:`get_finished` and
:py:meth:`get_peer_finished`.
:param function: Either :py:data:`SSL_get_finished`: or
:py:data:`SSL_get_peer_finished`.
:return: :py:data:`None` if the desired message has not yet been
received, otherwise the contents of the message.
:rtype: :py:class:`bytes` or :py:class:`NoneType`
"""
# The OpenSSL documentation says nothing about what might happen if the
# count argument given is zero. Specifically, it doesn't say whether
# the output buffer may be NULL in that case or not. Inspection of the
# implementation reveals that it calls memcpy() unconditionally.
# Section 7.1.4, paragraph 1 of the C standard suggests that
# memcpy(NULL, source, 0) is not guaranteed to produce defined (let
# alone desirable) behavior (though it probably does on just about
# every implementation...)
#
# Allocate a tiny buffer to pass in (instead of just passing NULL as
# one might expect) for the initial call so as to be safe against this
# potentially undefined behavior.
empty = _ffi.new("char[]", 0)
size = function(self._ssl, empty, 0)
if size == 0:
# No Finished message so far.
return None
buf = _ffi.new("char[]", size)
function(self._ssl, buf, size)
return _ffi.buffer(buf, size)[:]
def get_finished(self):
"""
Obtain the latest `handshake finished` message sent to the peer.
:return: The contents of the message or :py:obj:`None` if the TLS
handshake has not yet completed.
:rtype: :py:class:`bytes` or :py:class:`NoneType`
"""
return self._get_finished_message(_lib.SSL_get_finished)
def get_peer_finished(self):
"""
Obtain the latest `handshake finished` message received from the peer.
:return: The contents of the message or :py:obj:`None` if the TLS
handshake has not yet completed.
:rtype: :py:class:`bytes` or :py:class:`NoneType`
"""
return self._get_finished_message(_lib.SSL_get_peer_finished)
def get_cipher_name(self):
"""
Obtain the name of the currently used cipher.
:returns: The name of the currently used cipher or :py:obj:`None`
if no connection has been established.
:rtype: :py:class:`unicode` or :py:class:`NoneType`
"""
cipher = _lib.SSL_get_current_cipher(self._ssl)
if cipher == _ffi.NULL:
return None
else:
name = _ffi.string(_lib.SSL_CIPHER_get_name(cipher))
return name.decode("utf-8")
def get_cipher_bits(self):
"""
Obtain the number of secret bits of the currently used cipher.
:returns: The number of secret bits of the currently used cipher
or :py:obj:`None` if no connection has been established.
:rtype: :py:class:`int` or :py:class:`NoneType`
"""
cipher = _lib.SSL_get_current_cipher(self._ssl)
if cipher == _ffi.NULL:
return None
else:
return _lib.SSL_CIPHER_get_bits(cipher, _ffi.NULL)
def get_cipher_version(self):
"""
Obtain the protocol version of the currently used cipher.
:returns: The protocol name of the currently used cipher
or :py:obj:`None` if no connection has been established.
:rtype: :py:class:`unicode` or :py:class:`NoneType`
"""
cipher = _lib.SSL_get_current_cipher(self._ssl)
if cipher == _ffi.NULL:
return None
else:
version = _ffi.string(_lib.SSL_CIPHER_get_version(cipher))
return version.decode("utf-8")
def get_protocol_version_name(self):
"""
Obtain the protocol version of the current connection.
:returns: The TLS version of the current connection, for example
the value for TLS 1.2 would be ``TLSv1.2``or ``Unknown``
for connections that were not successfully established.
:rtype: :py:class:`unicode`
"""
version = _ffi.string(_lib.SSL_get_version(self._ssl))
return version.decode("utf-8")
def get_protocol_version(self):
"""
Obtain the protocol version of the current connection.
:returns: The TLS version of the current connection, for example
the value for TLS 1 would be 0x769.
:rtype: :py:class:`int`
"""
version = _lib.SSL_version(self._ssl)
return version
@_requires_npn
def get_next_proto_negotiated(self):
"""
Get the protocol that was negotiated by NPN.
"""
data = _ffi.new("unsigned char **")
data_len = _ffi.new("unsigned int *")
_lib.SSL_get0_next_proto_negotiated(self._ssl, data, data_len)
return _ffi.buffer(data[0], data_len[0])[:]
@_requires_alpn
def set_alpn_protos(self, protos):
"""
Specify the client's ALPN protocol list.
These protocols are offered to the server during protocol negotiation.
:param protos: A list of the protocols to be offered to the server.
This list should be a Python list of bytestrings representing the
protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
"""
# Take the list of protocols and join them together, prefixing them
# with their lengths.
protostr = b''.join(
chain.from_iterable((int2byte(len(p)), p) for p in protos)
)
# Build a C string from the list. We don't need to save this off
# because OpenSSL immediately copies the data out.
input_str = _ffi.new("unsigned char[]", protostr)
input_str_len = _ffi.cast("unsigned", len(protostr))
_lib.SSL_set_alpn_protos(self._ssl, input_str, input_str_len)
@_requires_alpn
def get_alpn_proto_negotiated(self):
"""
Get the protocol that was negotiated by ALPN.
"""
data = _ffi.new("unsigned char **")
data_len = _ffi.new("unsigned int *")
_lib.SSL_get0_alpn_selected(self._ssl, data, data_len)
if not data_len:
return b''
return _ffi.buffer(data[0], data_len[0])[:]
ConnectionType = Connection
# This is similar to the initialization calls at the end of OpenSSL/crypto.py
# but is exercised mostly by the Context initializer.
_lib.SSL_library_init()
|
apache-2.0
|
simontakite/sysadmin
|
pythonscripts/pythonnetworkingcoookbook/chapter1/1_10_reuse_socket_address.py
|
2
|
1277
|
#!/usr/bin/env python
# Python Network Programming Cookbook -- Chapter - 1
# This program is optimized for Python 2.7.
# It may run on any other version with/without modifications.
import socket
import sys
def reuse_socket_addr():
sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
# Get the old state of the SO_REUSEADDR option
old_state = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR )
print "Old sock state: %s" %old_state
# Enable the SO_REUSEADDR option
sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
new_state = sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR )
print "New sock state: %s" %new_state
local_port = 8282
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srv.bind( ('', local_port) )
srv.listen(1)
print ("Listening on port: %s " %local_port)
while True:
try:
connection, addr = srv.accept()
print 'Connected by %s:%s' % (addr[0], addr[1])
except KeyboardInterrupt:
break
except socket.error, msg:
print '%s' % (msg,)
if __name__ == '__main__':
reuse_socket_addr()
|
gpl-2.0
|
thaim/ansible
|
lib/ansible/modules/storage/infinidat/infini_export_client.py
|
21
|
5608
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Gregory Shulov ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_export_client
version_added: 2.3
short_description: Create, Delete or Modify NFS Client(s) for existing exports on Infinibox
description:
- This module creates, deletes or modifys NFS client(s) for existing exports on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
client:
description:
- Client IP or Range. Ranges can be defined as follows
192.168.0.1-192.168.0.254.
aliases: ['name']
required: true
state:
description:
- Creates/Modifies client when present and removes when absent.
required: false
default: "present"
choices: [ "present", "absent" ]
access_mode:
description:
- Read Write or Read Only Access.
choices: [ "RW", "RO" ]
default: RW
required: false
no_root_squash:
description:
- Don't squash root user to anonymous. Will be set to "no" on creation if not specified explicitly.
type: bool
default: no
required: false
export:
description:
- Name of the export.
required: true
extends_documentation_fragment:
- infinibox
requirements:
- munch
'''
EXAMPLES = '''
- name: Make sure nfs client 10.0.0.1 is configured for export. Allow root access
infini_export_client:
client: 10.0.0.1
access_mode: RW
no_root_squash: yes
export: /data
user: admin
password: secret
system: ibox001
- name: Add multiple clients with RO access. Squash root privileges
infini_export_client:
client: "{{ item }}"
access_mode: RO
no_root_squash: no
export: /data
user: admin
password: secret
system: ibox001
with_items:
- 10.0.0.2
- 10.0.0.3
'''
RETURN = '''
'''
import traceback
MUNCH_IMP_ERR = None
try:
from munch import Munch, unmunchify
HAS_MUNCH = True
except ImportError:
MUNCH_IMP_ERR = traceback.format_exc()
HAS_MUNCH = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
def transform(d):
return frozenset(d.items())
@api_wrapper
def get_export(module, system):
"""Return export if found. Fail module if not found"""
try:
export = system.exports.get(export_path=module.params['export'])
except Exception:
module.fail_json(msg="Export with export path {0} not found".format(module.params['export']))
return export
@api_wrapper
def update_client(module, export):
"""Update export client list"""
changed = False
client = module.params['client']
access_mode = module.params['access_mode']
no_root_squash = module.params['no_root_squash']
client_list = export.get_permissions()
client_not_in_list = True
for index, item in enumerate(client_list):
if item.client == client:
client_not_in_list = False
if item.access != access_mode:
item.access = access_mode
changed = True
if item.no_root_squash is not no_root_squash:
item.no_root_squash = no_root_squash
changed = True
# If access_mode and/or no_root_squash not passed as arguments to the module,
# use access_mode with RW value and set no_root_squash to False
if client_not_in_list:
changed = True
client_list.append(Munch(client=client, access=access_mode, no_root_squash=no_root_squash))
if changed:
for index, item in enumerate(client_list):
client_list[index] = unmunchify(item)
if not module.check_mode:
export.update_permissions(client_list)
module.exit_json(changed=changed)
@api_wrapper
def delete_client(module, export):
"""Update export client list"""
changed = False
client = module.params['client']
client_list = export.get_permissions()
for index, item in enumerate(client_list):
if item.client == client:
changed = True
del client_list[index]
if changed:
for index, item in enumerate(client_list):
client_list[index] = unmunchify(item)
if not module.check_mode:
export.update_permissions(client_list)
module.exit_json(changed=changed)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
client=dict(required=True),
access_mode=dict(choices=['RO', 'RW'], default='RW'),
no_root_squash=dict(type='bool', default='no'),
state=dict(default='present', choices=['present', 'absent']),
export=dict(required=True)
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
if not HAS_MUNCH:
module.fail_json(msg=missing_required_lib('munch'), exception=MUNCH_IMP_ERR)
system = get_system(module)
export = get_export(module, system)
if module.params['state'] == 'present':
update_client(module, export)
else:
delete_client(module, export)
if __name__ == '__main__':
main()
|
mit
|
michalliu/OpenWrt-Firefly-Libraries
|
staging_dir/host/lib/python2.7/distutils/dep_util.py
|
177
|
3509
|
"""distutils.dep_util
Utility functions for simple, timestamp-based dependency of files
and groups of files; also, function based entirely on such
timestamp dependency analysis."""
__revision__ = "$Id$"
import os
from stat import ST_MTIME
from distutils.errors import DistutilsFileError
def newer(source, target):
"""Tells if the target is newer than the source.
Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Return false if both exist and 'target' is the same age or younger
than 'source'. Raise DistutilsFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same second
will have the same "age".
"""
if not os.path.exists(source):
raise DistutilsFileError("file '%s' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source)[ST_MTIME] > os.stat(target)[ST_MTIME]
def newer_pairwise(sources, targets):
"""Walk two filename lists in parallel, testing if each source is newer
than its corresponding target. Return a pair of lists (sources,
targets) where source is newer than target, according to the semantics
of 'newer()'.
"""
if len(sources) != len(targets):
raise ValueError, "'sources' and 'targets' must be same length"
# build a pair of lists (sources, targets) where source is newer
n_sources = []
n_targets = []
for source, target in zip(sources, targets):
if newer(source, target):
n_sources.append(source)
n_targets.append(target)
return n_sources, n_targets
def newer_group(sources, target, missing='error'):
"""Return true if 'target' is out-of-date with respect to any file
listed in 'sources'.
In other words, if 'target' exists and is newer
than every file in 'sources', return false; otherwise return true.
'missing' controls what we do when a source file is missing; the
default ("error") is to blow up with an OSError from inside 'stat()';
if it is "ignore", we silently drop any missing source files; if it is
"newer", any missing source files make us assume that 'target' is
out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
carry out commands that wouldn't work because inputs are missing, but
that doesn't matter because you're not actually going to run the
commands).
"""
# If the target doesn't even exist, then it's definitely out-of-date.
if not os.path.exists(target):
return True
# Otherwise we have to find out the hard way: if *any* source file
# is more recent than 'target', then 'target' is out-of-date and
# we can immediately return true. If we fall through to the end
# of the loop, then 'target' is up-to-date and we return false.
target_mtime = os.stat(target)[ST_MTIME]
for source in sources:
if not os.path.exists(source):
if missing == 'error': # blow up when we stat() the file
pass
elif missing == 'ignore': # missing source dropped from
continue # target's dependency list
elif missing == 'newer': # missing source means target is
return True # out-of-date
if os.stat(source)[ST_MTIME] > target_mtime:
return True
return False
|
gpl-2.0
|
akirk/youtube-dl
|
youtube_dl/extractor/khanacademy.py
|
128
|
2740
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
)
class KhanAcademyIE(InfoExtractor):
_VALID_URL = r'^https?://(?:(?:www|api)\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
IE_NAME = 'KhanAcademy'
_TESTS = [{
'url': 'http://www.khanacademy.org/video/one-time-pad',
'md5': '7021db7f2d47d4fff89b13177cb1e8f4',
'info_dict': {
'id': 'one-time-pad',
'ext': 'mp4',
'title': 'The one-time pad',
'description': 'The perfect cipher',
'duration': 176,
'uploader': 'Brit Cruise',
'uploader_id': 'khanacademy',
'upload_date': '20120411',
},
'add_ie': ['Youtube'],
}, {
'url': 'https://www.khanacademy.org/math/applied-math/cryptography',
'info_dict': {
'id': 'cryptography',
'title': 'Journey into cryptography',
'description': 'How have humans protected their secret messages through history? What has changed today?',
},
'playlist_mincount': 3,
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
if m.group('key') == 'video':
data = self._download_json(
'http://api.khanacademy.org/api/v1/videos/' + video_id,
video_id, 'Downloading video info')
upload_date = unified_strdate(data['date_added'])
uploader = ', '.join(data['author_names'])
return {
'_type': 'url_transparent',
'url': data['url'],
'id': video_id,
'title': data['title'],
'thumbnail': data['image_url'],
'duration': data['duration'],
'description': data['description'],
'uploader': uploader,
'upload_date': upload_date,
}
else:
# topic
data = self._download_json(
'http://api.khanacademy.org/api/v1/topic/' + video_id,
video_id, 'Downloading topic info')
entries = [
{
'_type': 'url',
'url': c['url'],
'id': c['id'],
'title': c['title'],
}
for c in data['children'] if c['kind'] in ('Video', 'Topic')]
return {
'_type': 'playlist',
'id': video_id,
'title': data['title'],
'description': data['description'],
'entries': entries,
}
|
unlicense
|
bboalimoe/ndn-cache-policy
|
docs/sphinx-contrib/tikz/setup.py
|
3
|
1632
|
# -*- coding: utf-8 -*-
LONG_DESCRIPTION = \
'''
This package contains the tikz Sphinx extension, which enables the use
of the PGF/TikZ LaTeX package to draw nice pictures.
'''
NAME = 'sphinxcontrib-tikz'
DESCRIPTION = 'TikZ extension for Sphinx'
VERSION = '0.4.1'
AUTHOR = 'Christoph Reller'
AUTHOR_EMAIL = '[email protected]'
URL = 'https://bitbucket.org/philexander/tikz'
DOWNLOAD = 'http://pypi.python.org/pypi/sphinxcontrib-tikz'
LICENSE = 'BSD'
REQUIRES = ['Sphinx']
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Utilities',
]
if __name__ == "__main__":
from setuptools import setup, find_packages
import sys
# Use 2to3 for Python 3 without warnings in Python 2
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(
name=NAME,
version=VERSION,
url=URL,
download_url=DOWNLOAD,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
zip_safe=False,
classifiers=CLASSIFIERS,
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=REQUIRES,
namespace_packages=['sphinxcontrib'],
**extra
)
|
gpl-3.0
|
d33tah/npyscreen
|
TEST-SAFE-DISPLAY.py
|
8
|
1765
|
# coding=utf-8
import npyscreen
npyscreen.npysGlobalOptions.ASCII_ONLY = False
class TestApp(npyscreen.NPSAppManaged):
#__TEXT_WIDGET = npyscreen.TitleText
__TEXT_WIDGET = npyscreen.TextfieldUnicode
def main(self):
# These lines create the form and populate it with widgets.
# A fairly complex screen in only 8 or so lines of code - a line for each control.
npyscreen.setTheme(npyscreen.Themes.ColorfulTheme)
F = npyscreen.ActionFormWithMenus(name = "Welcome to Npyscreen",)
t1 = F.add(self.__class__.__TEXT_WIDGET, name = "Text:", )
t2 = F.add(self.__class__.__TEXT_WIDGET, name = "Text:", )
t3 = F.add(self.__class__.__TEXT_WIDGET, name = "Text:", )
t4 = F.add(self.__class__.__TEXT_WIDGET, name = "Text:", )
m1 = F.add(npyscreen.MultiLine, name = "Mutliline", scroll_exit=True, max_height=5)
me = F.add(npyscreen.MultiLineEdit, name="Testing", autowrap=False)
t1.value = u"This is a \n test"
t2.value = u"This is a é test"
t3.value = u"This is ∑ a test"
t1.value = u"Testing tripple width \u3111 stuff."
t2.value = u"Testing double width stuff \u1000 <- there"
t4.value = u"another test is \u1D656 this one."
m1.values = [t1.value, t2.value, t3.value,
'another test \u24AF is here',
'another test is \u1D666 this one.']
me.value = '\n'.join([t1.value, t2.value, t3.value,
'another test is \u1D656 this one.'])
# This lets the user play with the Form.
F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
|
bsd-2-clause
|
roadmapper/ansible
|
lib/ansible/module_utils/facts/system/env.py
|
232
|
1170
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.six import iteritems
from ansible.module_utils.facts.collector import BaseFactCollector
class EnvFactCollector(BaseFactCollector):
name = 'env'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
env_facts = {}
env_facts['env'] = {}
for k, v in iteritems(os.environ):
env_facts['env'][k] = v
return env_facts
|
gpl-3.0
|
FeliciaLim/oss-fuzz
|
infra/base-images/base-msan-builder/packages/package.py
|
4
|
2413
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
import os
import subprocess
import apt
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def ApplyPatch(source_directory, patch_name):
"""Apply custom patch."""
subprocess.check_call(['patch', '-p1', '-i',
os.path.join(SCRIPT_DIR, patch_name)],
cwd=source_directory)
class PackageException(Exception):
"""Base package exception."""
class Package(object):
"""Base package."""
def __init__(self, name, apt_version):
self.name = name
self.apt_version = apt_version
def PreBuild(self, source_directory, env, custom_bin_dir):
return
def PostBuild(self, source_directory, env, custom_bin_dir):
return
def PreDownload(self, download_directory):
return
def PostDownload(self, source_directory):
return
def InstallBuildDeps(self):
"""Install build dependencies for a package."""
subprocess.check_call(['apt-get', 'update'])
subprocess.check_call(['apt-get', 'build-dep', '-y', self.name])
# Reload package after update.
self.apt_version = (
apt.Cache()[self.apt_version.package.name].candidate)
def DownloadSource(self, download_directory):
"""Download the source for a package."""
self.PreDownload(download_directory)
source_directory = self.apt_version.fetch_source(download_directory)
self.PostDownload(source_directory)
return source_directory
def Build(self, source_directory, env, custom_bin_dir):
"""Build .deb packages."""
self.PreBuild(source_directory, env, custom_bin_dir)
subprocess.check_call(
['dpkg-buildpackage', '-us', '-uc', '-B'],
cwd=source_directory, env=env)
self.PostBuild(source_directory, env, custom_bin_dir)
|
apache-2.0
|
ieguinoa/tools-iuc
|
tools/gff3_rebase/gff3_rebase.py
|
23
|
7830
|
#!/usr/bin/env python
import argparse
import copy
import logging
import sys
from BCBio import GFF
from Bio.SeqFeature import FeatureLocation
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
__author__ = "Eric Rasche"
__version__ = "0.4.0"
__maintainer__ = "Eric Rasche"
__email__ = "[email protected]"
def feature_lambda(feature_list, test, test_kwargs, subfeatures=True):
"""Recursively search through features, testing each with a test function, yielding matches.
GFF3 is a hierachical data structure, so we need to be able to recursively
search through features. E.g. if you're looking for a feature with
ID='bob.42', you can't just do a simple list comprehension with a test
case. You don't know how deeply burried bob.42 will be in the feature tree. This is where feature_lambda steps in.
:type feature_list: list
:param feature_list: an iterable of features
:type test: function reference
:param test: a closure with the method signature (feature, **kwargs) where
the kwargs are those passed in the next argument. This
function should return True or False, True if the feature is
to be yielded as part of the main feature_lambda function, or
False if it is to be ignored. This function CAN mutate the
features passed to it (think "apply").
:type test_kwargs: dictionary
:param test_kwargs: kwargs to pass to your closure when it is called.
:type subfeatures: boolean
:param subfeatures: when a feature is matched, should just that feature be
yielded to the caller, or should the entire sub_feature
tree for that feature be included? subfeatures=True is
useful in cases such as searching for a gene feature,
and wanting to know what RBS/Shine_Dalgarno_sequences
are in the sub_feature tree (which can be accomplished
with two feature_lambda calls). subfeatures=False is
useful in cases when you want to process (and possibly
return) the entire feature tree, such as applying a
qualifier to every single feature.
:rtype: yielded list
:return: Yields a list of matching features.
"""
# Either the top level set of [features] or the subfeature attribute
for feature in feature_list:
if test(feature, **test_kwargs):
if not subfeatures:
feature_copy = copy.deepcopy(feature)
feature_copy.sub_features = []
yield feature_copy
else:
yield feature
if hasattr(feature, 'sub_features'):
for x in feature_lambda(feature.sub_features, test, test_kwargs, subfeatures=subfeatures):
yield x
def feature_test_qual_value(feature, **kwargs):
"""Test qualifier values.
For every feature, check that at least one value in
feature.quailfiers(kwargs['qualifier']) is in kwargs['attribute_list']
"""
for attribute_value in feature.qualifiers.get(kwargs['qualifier'], []):
if attribute_value in kwargs['attribute_list']:
return True
return False
def __get_features(child, interpro=False):
child_features = {}
for rec in GFF.parse(child):
# Only top level
for feature in rec.features:
# Get the record id as parent_feature_id (since this is how it will be during remapping)
parent_feature_id = rec.id
# If it's an interpro specific gff3 file
if interpro:
# Then we ignore polypeptide features as they're useless
if feature.type == 'polypeptide':
continue
# If there's an underscore, we strip up to that underscore?
# I do not know the rationale for this, removing.
# if '_' in parent_feature_id:
# parent_feature_id = parent_feature_id[parent_feature_id.index('_') + 1:]
try:
child_features[parent_feature_id].append(feature)
except KeyError:
child_features[parent_feature_id] = [feature]
# Keep a list of feature objects keyed by parent record id
return child_features
def __update_feature_location(feature, parent, protein2dna):
start = feature.location.start
end = feature.location.end
if protein2dna:
start *= 3
end *= 3
if parent.location.strand >= 0:
ns = parent.location.start + start
ne = parent.location.start + end
st = +1
else:
ns = parent.location.end - end
ne = parent.location.end - start
st = -1
# Don't let start/stops be less than zero. It's technically valid for them
# to be (at least in the model I'm working with) but it causes numerous
# issues.
#
# Instead, we'll replace with %3 to try and keep it in the same reading
# frame that it should be in.
if ns < 0:
ns %= 3
if ne < 0:
ne %= 3
feature.location = FeatureLocation(ns, ne, strand=st)
if hasattr(feature, 'sub_features'):
for subfeature in feature.sub_features:
__update_feature_location(subfeature, parent, protein2dna)
def rebase(parent, child, interpro=False, protein2dna=False, map_by='ID'):
# get all of the features we will be re-mapping in a dictionary, keyed by parent feature ID
child_features = __get_features(child, interpro=interpro)
for rec in GFF.parse(parent):
replacement_features = []
for feature in feature_lambda(
rec.features,
# Filter features in the parent genome by those that are
# "interesting", i.e. have results in child_features array.
# Probably an unnecessary optimisation.
feature_test_qual_value,
{
'qualifier': map_by,
'attribute_list': child_features.keys(),
},
subfeatures=False):
# Features which will be re-mapped
to_remap = child_features[feature.id]
# TODO: update starts
fixed_features = []
for x in to_remap:
# Then update the location of the actual feature
__update_feature_location(x, feature, protein2dna)
if interpro:
for y in ('status', 'Target'):
try:
del x.qualifiers[y]
except Exception:
pass
fixed_features.append(x)
replacement_features.extend(fixed_features)
# We do this so we don't include the original set of features that we
# were rebasing against in our result.
rec.features = replacement_features
rec.annotations = {}
GFF.write([rec], sys.stdout)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='rebase gff3 features against parent locations', epilog="")
parser.add_argument('parent', type=argparse.FileType('r'), help='Parent GFF3 annotations')
parser.add_argument('child', type=argparse.FileType('r'), help='Child GFF3 annotations to rebase against parent')
parser.add_argument('--interpro', action='store_true',
help='Interpro specific modifications')
parser.add_argument('--protein2dna', action='store_true',
help='Map protein translated results to original DNA data')
parser.add_argument('--map_by', help='Map by key', default='ID')
args = parser.parse_args()
rebase(**vars(args))
|
mit
|
molotof/infernal-twin
|
build/reportlab/src/reportlab/lib/yaml.py
|
32
|
5735
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/yaml.py
# parses "Yet Another Markup Language" into a list of tuples.
# Each tuple says what the data is e.g.
# ('Paragraph', 'Heading1', 'Why Reportlab Rules')
# and the pattern depends on type.
"""
.h1 Welcome to YAML!
YAML is "Yet Another Markup Language" - a markup language
which is easier to type in than XML, yet gives us a
reasonable selection of formats.
The general rule is that if a line begins with a '.',
it requires special processing. Otherwise lines
are concatenated to paragraphs, and blank lines
separate paragraphs.
If the line ".foo bar bletch" is encountered,
it immediately ends and writes out any current
paragraph.
It then looks for a parser method called 'foo';
if found, it is called with arguments (bar, bletch).
If this is not found, it assumes that 'foo' is a
paragraph style, and the text for the first line
of the paragraph is 'bar bletch'. It would be
up to the formatter to decide whether on not 'foo'
was a valid paragraph.
Special commands understood at present are:
dot image filename
- adds the image to the document
dot beginPre Code
- begins a Preformatted object in style 'Code'
dot endPre
- ends a preformatted object.
"""
__version__=''' $Id$ '''
import sys
#modes:
PLAIN = 1
PREFORMATTED = 2
BULLETCHAR = '\267' # assumes font Symbol, but works on all platforms
class BaseParser:
""""Simplest possible parser with only the most basic options.
This defines the line-handling abilities and basic mechanism.
The class YAMLParser includes capabilities for a fairly rich
story."""
def __init__(self):
self.reset()
def reset(self):
self._lineNo = 0
self._style = 'Normal' # the default
self._results = []
self._buf = []
self._mode = PLAIN
def parseFile(self, filename):
#returns list of objects
data = open(filename, 'r').readlines()
for line in data:
#strip trailing newlines
self.readLine(line[:-1])
self.endPara()
return self._results
def parseText(self, textBlock):
"Parses the a possible multi-line text block"
lines = textBlock.split('\n')
for line in lines:
self.readLine(line)
self.endPara()
return self._results
def readLine(self, line):
#this is the inner loop
self._lineNo = self._lineNo + 1
stripped = line.lstrip()
if len(stripped) == 0:
if self._mode == PLAIN:
self.endPara()
else: #preformatted, append it
self._buf.append(line)
elif line[0]=='.':
# we have a command of some kind
self.endPara()
words = stripped[1:].split()
cmd, args = words[0], words[1:]
#is it a parser method?
if hasattr(self.__class__, cmd):
#this was very bad; any type error in the method was hidden
#we have to hack the traceback
try:
getattr(self,cmd)(*args)
except TypeError as err:
sys.stderr.write("Parser method: %s(*%s) %s at line %d\n" % (cmd, args, err, self._lineNo))
raise
else:
# assume it is a paragraph style -
# becomes the formatter's problem
self.endPara() #end the last one
words = stripped.split(' ', 1)
assert len(words)==2, "Style %s but no data at line %d" % (words[0], self._lineNo)
(styletag, data) = words
self._style = styletag[1:]
self._buf.append(data)
else:
#we have data, add to para
self._buf.append(line)
def endPara(self):
#ends the current paragraph, or preformatted block
text = ' '.join(self._buf)
if text:
if self._mode == PREFORMATTED:
#item 3 is list of lines
self._results.append(('PREFORMATTED', self._style,
'\n'.join(self._buf)))
else:
self._results.append(('PARAGRAPH', self._style, text))
self._buf = []
self._style = 'Normal'
def beginPre(self, stylename):
self._mode = PREFORMATTED
self._style = stylename
def endPre(self):
self.endPara()
self._mode = PLAIN
def image(self, filename):
self.endPara()
self._results.append(('IMAGE', filename))
class Parser(BaseParser):
"""This adds a basic set of "story" components compatible with HTML & PDF.
Images, spaces"""
def vSpace(self, points):
"""Inserts a vertical spacer"""
self._results.append(('VSpace', points))
def pageBreak(self):
"""Inserts a frame break"""
self._results.append(('PageBreak','blah')) # must be a tuple
def custom(self, moduleName, funcName):
"""Goes and gets the Python object and adds it to the story"""
self.endPara()
self._results.append(('Custom',moduleName, funcName))
def nextPageTemplate(self, templateName):
self._results.append(('NextPageTemplate',templateName))
def parseFile(filename):
p = Parser()
return p.parseFile(filename)
def parseText(textBlock):
p = Parser()
return p.parseText(textBlock)
if __name__=='__main__': #NORUNTESTS
if len(sys.argv) != 2:
results = parseText(__doc__)
else:
results = parseFile(sys.argv[1])
import pprint
pprint.pprint(results)
|
gpl-3.0
|
ksachs/invenio
|
modules/bibsword/lib/bibsword_config.py
|
19
|
4478
|
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
Forward to ArXiv.org source code
'''
from invenio.bibformat_dblayer import get_tag_from_name
#Maximal time to keep the stored XML Service doucment before reloading it in sec
CFG_BIBSWORD_SERVICEDOCUMENT_UPDATE_TIME = 3600
#Default submission status
CFG_SUBMISSION_STATUS_SUBMITTED = "submitted"
CFG_SUBMISSION_STATUS_PUBLISHED = "published"
CFG_SUBMISSION_STATUS_ONHOLD = "onhold"
CFG_SUBMISSION_STATUS_REMOVED = "removed"
CFG_SUBMIT_ARXIV_INFO_MESSAGE = "Submitted from Invenio to arXiv by %s, on %s, as %s"
CFG_DOCTYPE_UPLOAD_COLLECTION = 'PUSHED_TO_ARXIV'
# report number:
marc_tag_main_report_number = get_tag_from_name('primary report number')
if marc_tag_main_report_number:
CFG_MARC_REPORT_NUMBER = marc_tag_main_report_number
else:
CFG_MARC_REPORT_NUMBER = '037__a'
# title:
marc_tag_title = get_tag_from_name('title')
if marc_tag_title:
CFG_MARC_TITLE = marc_tag_title
else:
CFG_MARC_TITLE = '245__a'
# author name:
marc_tag_author = get_tag_from_name('first author name')
if marc_tag_author:
CFG_MARC_AUTHOR_NAME = marc_tag_author
else:
CFG_MARC_AUTHOR_NAME = '100__a'
# author affiliation
marc_tag_author_affiliation = get_tag_from_name('first author affiliation')
if marc_tag_author_affiliation:
CFG_MARC_AUTHOR_AFFILIATION = marc_tag_author_affiliation
else:
CFG_MARC_AUTHOR_AFFILIATION = '100__u'
# contributor name:
marc_tag_contributor_name = get_tag_from_name('additional author name')
if marc_tag_contributor_name:
CFG_MARC_CONTRIBUTOR_NAME = marc_tag_contributor_name
else:
CFG_MARC_CONTRIBUTOR_NAME = '700__a'
# contributor affiliation:
marc_tag_contributor_affiliation = get_tag_from_name('additional author affiliation')
if marc_tag_contributor_affiliation:
CFG_MARC_CONTRIBUTOR_AFFILIATION = marc_tag_contributor_affiliation
else:
CFG_MARC_CONTRIBUTOR_AFFILIATION = '700__u'
# abstract:
marc_tag_abstract = get_tag_from_name('main abstract')
if marc_tag_abstract:
CFG_MARC_ABSTRACT = marc_tag_abstract
else:
CFG_MARC_ABSTRACT = '520__a'
# additional report number
marc_tag_additional_report_number = get_tag_from_name('additional report number')
if marc_tag_additional_report_number:
CFG_MARC_ADDITIONAL_REPORT_NUMBER = marc_tag_additional_report_number
else:
CFG_MARC_ADDITIONAL_REPORT_NUMBER = '088__a'
# doi
marc_tag_doi = get_tag_from_name('doi')
if marc_tag_doi:
CFG_MARC_DOI = marc_tag_doi
else:
CFG_MARC_DOI = '909C4a'
# journal code
marc_tag_journal_ref_code = get_tag_from_name('journal code')
if marc_tag_journal_ref_code:
CFG_MARC_JOURNAL_REF_CODE = marc_tag_journal_ref_code
else:
CFG_MARC_JOURNAL_REF_CODE = '909C4c'
# journal reference title
marc_tag_journal_ref_title = get_tag_from_name('journal title')
if marc_tag_journal_ref_title:
CFG_MARC_JOURNAL_REF_TITLE = marc_tag_journal_ref_title
else:
CFG_MARC_JOURNAL_REF_TITLE = '909C4p'
# journal reference page
marc_tag_journal_ref_page = get_tag_from_name('journal page')
if marc_tag_journal_ref_page:
CFG_MARC_JOURNAL_REF_PAGE = marc_tag_journal_ref_page
else:
CFG_MARC_JOURNAL_REF_PAGE = '909C4v'
# journal reference year
marc_tag_journal_ref_year = get_tag_from_name('journal year')
if marc_tag_journal_ref_year:
CFG_MARC_JOURNAL_REF_YEAR = marc_tag_journal_ref_year
else:
CFG_MARC_JOURNAL_REF_YEAR = '909C4y'
# comment
marc_tag_comment = get_tag_from_name('comment')
if marc_tag_comment:
CFG_MARC_COMMENT = marc_tag_comment
else:
CFG_MARC_COMMENT = '500__a'
# internal note field
marc_tag_internal_note = get_tag_from_name('internal notes')
if marc_tag_internal_note:
CFG_MARC_RECORD_SUBMIT_INFO = marc_tag_internal_note
else:
CFG_MARC_RECORD_SUBMIT_INFO = '595__a'
|
gpl-2.0
|
tboyce1/home-assistant
|
homeassistant/components/sensor/ihc.py
|
2
|
2909
|
"""IHC sensor platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.ihc/
"""
from xml.etree.ElementTree import Element
import voluptuous as vol
from homeassistant.components.ihc import (
validate_name, IHC_DATA, IHC_CONTROLLER, IHC_INFO)
from homeassistant.components.ihc.ihcdevice import IHCDevice
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_ID, CONF_NAME, CONF_UNIT_OF_MEASUREMENT, CONF_SENSORS,
TEMP_CELSIUS)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
DEPENDENCIES = ['ihc']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SENSORS, default=[]):
vol.All(cv.ensure_list, [
vol.All({
vol.Required(CONF_ID): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT,
default=TEMP_CELSIUS): cv.string
}, validate_name)
])
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the ihc sensor platform."""
ihc_controller = hass.data[IHC_DATA][IHC_CONTROLLER]
info = hass.data[IHC_DATA][IHC_INFO]
devices = []
if discovery_info:
for name, device in discovery_info.items():
ihc_id = device['ihc_id']
product_cfg = device['product_cfg']
product = device['product']
sensor = IHCSensor(ihc_controller, name, ihc_id, info,
product_cfg[CONF_UNIT_OF_MEASUREMENT],
product)
devices.append(sensor)
else:
sensors = config[CONF_SENSORS]
for sensor_cfg in sensors:
ihc_id = sensor_cfg[CONF_ID]
name = sensor_cfg[CONF_NAME]
unit = sensor_cfg[CONF_UNIT_OF_MEASUREMENT]
sensor = IHCSensor(ihc_controller, name, ihc_id, info, unit)
devices.append(sensor)
add_devices(devices)
class IHCSensor(IHCDevice, Entity):
"""Implementation of the IHC sensor."""
def __init__(self, ihc_controller, name, ihc_id: int, info: bool,
unit, product: Element=None) -> None:
"""Initialize the IHC sensor."""
super().__init__(ihc_controller, name, ihc_id, info, product)
self._state = None
self._unit_of_measurement = unit
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def on_ihc_change(self, ihc_id, value):
"""Callback when ihc resource changes."""
self._state = value
self.schedule_update_ha_state()
|
apache-2.0
|
porduna/flask-admin
|
flask_admin/contrib/pymongo/view.py
|
6
|
9257
|
import logging
import pymongo
from bson import ObjectId
from bson.errors import InvalidId
from flask import flash
from flask.ext.admin._compat import string_types
from flask.ext.admin.babel import gettext, ngettext, lazy_gettext
from flask.ext.admin.model import BaseModelView
from flask.ext.admin.actions import action
from flask.ext.admin.helpers import get_form_data
from .filters import BasePyMongoFilter
from .tools import parse_like_term
# Set up logger
log = logging.getLogger("flask-admin.pymongo")
class ModelView(BaseModelView):
"""
MongoEngine model scaffolding.
"""
column_filters = None
"""
Collection of the column filters.
Should contain instances of
:class:`flask.ext.admin.contrib.pymongo.filters.BasePyMongoFilter`
classes.
For example::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'),)
"""
def __init__(self, coll,
name=None, category=None, endpoint=None, url=None):
"""
Constructor
:param coll:
MongoDB collection object
:param name:
Display name
:param category:
Display category
:param endpoint:
Endpoint
:param url:
Custom URL
"""
self._search_fields = []
if name is None:
name = self._prettify_name(coll.name)
if endpoint is None:
endpoint = ('%sview' % coll.name).lower()
super(ModelView, self).__init__(None, name, category, endpoint, url)
self.coll = coll
def scaffold_pk(self):
return '_id'
def get_pk_value(self, model):
"""
Return primary key value from the model instance
:param model:
Model instance
"""
return model.get('_id')
def scaffold_list_columns(self):
"""
Scaffold list columns
"""
raise NotImplemented()
def scaffold_sortable_columns(self):
"""
Return sortable columns dictionary (name, field)
"""
return []
def init_search(self):
"""
Init search
"""
if self.column_searchable_list:
for p in self.column_searchable_list:
if not isinstance(p, string_types):
raise ValueError('Expected string')
# TODO: Validation?
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, attr):
"""
Return filter object(s) for the field
:param name:
Either field name or field instance
"""
raise NotImplemented()
def is_valid_filter(self, filter):
"""
Validate if it is valid MongoEngine filter
:param filter:
Filter object
"""
return isinstance(filter, BasePyMongoFilter)
def scaffold_form(self):
raise NotImplemented()
def _get_field_value(self, model, name):
"""
Get unformatted field value from the model
"""
return model.get(name)
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True):
"""
Get list of objects from MongoEngine
:param page:
Page number
:param sort_column:
Sort column
:param sort_desc:
Sort descending
:param search:
Search criteria
:param filters:
List of applied fiters
:param execute:
Run query immediately or not
"""
query = {}
# Filters
if self._filters:
data = []
for flt, value in filters:
f = self._filters[flt]
data = f.apply(data, value)
if data:
if len(data) == 1:
query = data[0]
else:
query['$and'] = data
# Search
if self._search_supported and search:
values = search.split(' ')
queries = []
# Construct inner querie
for value in values:
if not value:
continue
regex = parse_like_term(value)
stmt = []
for field in self._search_fields:
stmt.append({field: {'$regex': regex}})
if stmt:
if len(stmt) == 1:
queries.append(stmt[0])
else:
queries.append({'$or': stmt})
# Construct final query
if queries:
if len(queries) == 1:
final = queries[0]
else:
final = {'$and': queries}
if query:
query = {'$and': [query, final]}
else:
query = final
# Get count
count = self.coll.find(query).count()
# Sorting
sort_by = None
if sort_column:
sort_by = [(sort_column, pymongo.DESCENDING if sort_desc else pymongo.ASCENDING)]
else:
order = self._get_default_order()
if order:
sort_by = [(order[0], pymongo.DESCENDING if order[1] else pymongo.ASCENDING)]
# Pagination
skip = None
if page is not None:
skip = page * self.page_size
results = self.coll.find(query, sort=sort_by, skip=skip, limit=self.page_size)
if execute:
results = list(results)
return count, results
def _get_valid_id(self, id):
try:
return ObjectId(id)
except InvalidId:
return id
def get_one(self, id):
"""
Return single model instance by ID
:param id:
Model ID
"""
return self.coll.find_one({'_id': self._get_valid_id(id)})
def edit_form(self, obj):
"""
Create edit form from the MongoDB document
"""
return self._edit_form_class(get_form_data(), **obj)
def create_model(self, form):
"""
Create model helper
:param form:
Form instance
"""
try:
model = form.data
self._on_model_change(form, model, True)
self.coll.insert(model)
except Exception as ex:
flash(gettext('Failed to create model. %(error)s', error=str(ex)),
'error')
log.exception('Failed to create model')
return False
else:
self.after_model_change(form, model, True)
return True
def update_model(self, form, model):
"""
Update model helper
:param form:
Form instance
:param model:
Model instance to update
"""
try:
model.update(form.data)
self._on_model_change(form, model, False)
pk = self.get_pk_value(model)
self.coll.update({'_id': pk}, model)
except Exception as ex:
flash(gettext('Failed to update model. %(error)s', error=str(ex)),
'error')
log.exception('Failed to update model')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model helper
:param model:
Model instance
"""
try:
pk = self.get_pk_value(model)
if not pk:
raise ValueError('Document does not have _id')
self.on_model_delete(model)
self.coll.remove({'_id': pk})
return True
except Exception as ex:
flash(gettext('Failed to delete model. %(error)s', error=str(ex)),
'error')
log.exception('Failed to delete model')
return False
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected models?'))
def action_delete(self, ids):
try:
count = 0
# TODO: Optimize me
for pk in ids:
self.coll.remove({'_id': self._get_valid_id(pk)})
count += 1
flash(ngettext('Model was successfully deleted.',
'%(count)s models were successfully deleted.',
count,
count=count))
except Exception as ex:
flash(gettext('Failed to delete models. %(error)s', error=str(ex)), 'error')
|
bsd-3-clause
|
Vizerai/grpc
|
examples/python/route_guide/route_guide_server.py
|
8
|
4271
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the gRPC route guide server."""
from concurrent import futures
import time
import math
import grpc
import route_guide_pb2
import route_guide_pb2_grpc
import route_guide_resources
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def get_feature(feature_db, point):
"""Returns Feature at given location or None."""
for feature in feature_db:
if feature.location == point:
return feature
return None
def get_distance(start, end):
"""Distance between two points."""
coord_factor = 10000000.0
lat_1 = start.latitude / coord_factor
lat_2 = end.latitude / coord_factor
lon_1 = start.longitude / coord_factor
lon_2 = end.longitude / coord_factor
lat_rad_1 = math.radians(lat_1)
lat_rad_2 = math.radians(lat_2)
delta_lat_rad = math.radians(lat_2 - lat_1)
delta_lon_rad = math.radians(lon_2 - lon_1)
# Formula is based on http://mathforum.org/library/drmath/view/51879.html
a = (pow(math.sin(delta_lat_rad / 2), 2) +
(math.cos(lat_rad_1) * math.cos(lat_rad_2) * pow(
math.sin(delta_lon_rad / 2), 2)))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
R = 6371000
# metres
return R * c
class RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer):
"""Provides methods that implement functionality of route guide server."""
def __init__(self):
self.db = route_guide_resources.read_route_guide_database()
def GetFeature(self, request, context):
feature = get_feature(self.db, request)
if feature is None:
return route_guide_pb2.Feature(name="", location=request)
else:
return feature
def ListFeatures(self, request, context):
left = min(request.lo.longitude, request.hi.longitude)
right = max(request.lo.longitude, request.hi.longitude)
top = max(request.lo.latitude, request.hi.latitude)
bottom = min(request.lo.latitude, request.hi.latitude)
for feature in self.db:
if (feature.location.longitude >= left and
feature.location.longitude <= right and
feature.location.latitude >= bottom and
feature.location.latitude <= top):
yield feature
def RecordRoute(self, request_iterator, context):
point_count = 0
feature_count = 0
distance = 0.0
prev_point = None
start_time = time.time()
for point in request_iterator:
point_count += 1
if get_feature(self.db, point):
feature_count += 1
if prev_point:
distance += get_distance(prev_point, point)
prev_point = point
elapsed_time = time.time() - start_time
return route_guide_pb2.RouteSummary(
point_count=point_count,
feature_count=feature_count,
distance=int(distance),
elapsed_time=int(elapsed_time))
def RouteChat(self, request_iterator, context):
prev_notes = []
for new_note in request_iterator:
for prev_note in prev_notes:
if prev_note.location == new_note.location:
yield prev_note
prev_notes.append(new_note)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
RouteGuideServicer(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
|
apache-2.0
|
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/rest_framework/utils/humanize_datetime.py
|
144
|
1285
|
"""
Helper functions that convert strftime formats into more readable representations.
"""
from rest_framework import ISO_8601
def datetime_formats(formats):
format = ', '.join(formats).replace(
ISO_8601,
'YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z]'
)
return humanize_strptime(format)
def date_formats(formats):
format = ', '.join(formats).replace(ISO_8601, 'YYYY[-MM[-DD]]')
return humanize_strptime(format)
def time_formats(formats):
format = ', '.join(formats).replace(ISO_8601, 'hh:mm[:ss[.uuuuuu]]')
return humanize_strptime(format)
def humanize_strptime(format_string):
# Note that we're missing some of the locale specific mappings that
# don't really make sense.
mapping = {
"%Y": "YYYY",
"%y": "YY",
"%m": "MM",
"%b": "[Jan-Dec]",
"%B": "[January-December]",
"%d": "DD",
"%H": "hh",
"%I": "hh", # Requires '%p' to differentiate from '%H'.
"%M": "mm",
"%S": "ss",
"%f": "uuuuuu",
"%a": "[Mon-Sun]",
"%A": "[Monday-Sunday]",
"%p": "[AM|PM]",
"%z": "[+HHMM|-HHMM]"
}
for key, val in mapping.items():
format_string = format_string.replace(key, val)
return format_string
|
mit
|
dpiers/coderang-meteor
|
public/jsrepl/extern/python/unclosured/lib/python2.7/lib2to3/fixes/fix_isinstance.py
|
326
|
1609
|
# Copyright 2008 Armin Ronacher.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that cleans up a tuple argument to isinstance after the tokens
in it were fixed. This is mainly used to remove double occurrences of
tokens as a leftover of the long -> int / unicode -> str conversion.
eg. isinstance(x, (int, long)) -> isinstance(x, (int, int))
-> isinstance(x, int)
"""
from .. import fixer_base
from ..fixer_util import token
class FixIsinstance(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
'isinstance'
trailer< '(' arglist< any ',' atom< '('
args=testlist_gexp< any+ >
')' > > ')' >
>
"""
run_order = 6
def transform(self, node, results):
names_inserted = set()
testlist = results["args"]
args = testlist.children
new_args = []
iterator = enumerate(args)
for idx, arg in iterator:
if arg.type == token.NAME and arg.value in names_inserted:
if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
iterator.next()
continue
else:
new_args.append(arg)
if arg.type == token.NAME:
names_inserted.add(arg.value)
if new_args and new_args[-1].type == token.COMMA:
del new_args[-1]
if len(new_args) == 1:
atom = testlist.parent
new_args[0].prefix = atom.prefix
atom.replace(new_args[0])
else:
args[:] = new_args
node.changed()
|
mit
|
dpyro/servo
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/tokenizer.py
|
1710
|
76929
|
from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
|
mpl-2.0
|
wylee/runcommands
|
docs/conf.py
|
1
|
2972
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from datetime import date
sys.path.insert(0, os.path.abspath(".."))
from runcommands import __version__ # noqa: E402
# -- General configuration ------------------------------------------------
project = "RunCommands"
author = "Wyatt Baldwin"
copyright = f"{date.today().year} Wyatt Baldwin"
github_url = "https://github.com/wylee/runcommands"
version = __version__
release = version
language = None
master_doc = "index"
source_suffix = ".rst"
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
pygments_style = "sphinx"
todo_include_todos = False
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.githubpages",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
# reStructuredText options ------------------------------------------------
# This makes `xyz` the same as ``xyz``.
default_role = "literal"
# This is appended to the bottom of all docs.
rst_epilog = f"""
.. |project| replace:: {project}
.. |github_url| replace:: {github_url}
"""
# Options for autodoc extension -------------------------------------------
# DEPRECATED in Sphinx 3.0
autodoc_default_flags = ["members"]
# This is the new way to specify autodoc config, but it's not supported
# on Read the Docs yet.
# autodoc_default_options = {
# 'members': True,
# }
# Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
}
# -- Options for HTML output ----------------------------------------------
html_theme = "alabaster"
html_theme_options = {
"description": "Easily define and run multiple commands",
"github_user": "wylee",
"github_repo": "runcommands",
"page_width": "1200px",
"fixed_sidebar": True,
"sidebar_width": "300px",
"extra_nav_links": {
"Source (GitHub)": github_url,
},
}
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"searchbox.html",
]
}
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
htmlhelp_basename = "RunCommandsdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [
(
master_doc,
"RunCommands.tex",
"RunCommands Documentation",
"Wyatt Baldwin",
"manual",
),
]
# -- Options for manual page output ---------------------------------------
man_pages = [(master_doc, "runcommands", "RunCommands Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
(
master_doc,
"RunCommands",
"RunCommands Documentation",
author,
"RunCommands",
"One line description of project.",
"Miscellaneous",
),
]
|
mit
|
jonathandreyer/iuam-backend
|
app/notification/notifications.py
|
1
|
1160
|
# -*- coding: utf-8 -*-
import json
from notification.notifications_mysql import NotificationsMysql
class Notifications:
def __init__(self, db):
self.notifications_mysql = NotificationsMysql(db)
self.notifications_mysql.check_table(False)
def add(self, notification):
self.notifications_mysql.add(notification)
def valid(self, uuid):
self.notifications_mysql.validate(uuid)
def get_list(self, index_start, size):
return self._get(index_start, size, False)
def get_list_only_validated(self, index_start, size):
return self._get(index_start, size, True)
def _get(self, index, size, only_validated):
result = self.notifications_mysql.get_by_index(index, size, only_validated)
if result is None:
raise Exception
# TODO Feature to implement:
# Remove key 'validate' when "only_validated=True"
return json.dumps(result, default=lambda o: o.__dict__, indent=4)
def delete(self, notification_id):
self.notifications_mysql.delete(notification_id)
def delete_all(self):
self.notifications_mysql.delete_all()
|
gpl-3.0
|
pierreg/tensorflow
|
tensorflow/contrib/learn/python/learn/trainable.py
|
10
|
2980
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`Trainable` interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
class Trainable(object):
"""Interface for objects that are trainable by, e.g., `Experiment`.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""Trains a model given training data `x` predictions and `y` targets.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
target - `Tensor` or dictionary of `Tensor` with target labels.
If input_fn is set, `x`, `y`, and `batch_size` must be `None`.
steps: Number of steps for which to train model. If `None`, train forever.
'steps' works incrementally. If you call two times fit(steps=10) then
training occurs in total 20 steps. If you don't want to have incremental
behaviour please set `max_steps` instead. If set, `max_steps` must be
`None`.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. If set, `steps` must be `None`.
Two calls to `fit(steps=100)` means 200 training
iterations. On the other hand, two calls to `fit(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
Returns:
`self`, for chaining.
"""
raise NotImplementedError
|
apache-2.0
|
Cynerd/linux-conf-perf
|
scripts/evaluate.py
|
1
|
3991
|
#!/usr/bin/env python3
import os
import sys
import numpy.linalg as nplag
from conf import conf
from conf import sf
import utils
def reduce_matrix_search_for_base_recurse(wset, columns, contains, ignore):
bases = []
for x in range(0, len(columns)):
if x in contains or x in ignore:
continue
colide = False
for i in range(0, len(wset)):
if wset[i] == 1 and columns[x][i] == 1:
colide = True
break
if not colide:
newset = list(wset)
onecount = 0
for i in range(0, len(newset)):
newset[i] = newset[i] | columns[x][i]
if (newset[i] == 1):
onecount += 1
contains.add(x)
if onecount == len(newset):
bases.append(set(contains))
else:
rbases = reduce_matrix_search_for_base_recurse(newset, columns, contains, ignore)
for rbase in rbases:
if not rbase in bases:
bases.append(rbase)
contains.remove(x)
return bases
0
def reduce_matrix_search_for_base(columns):
bases = []
ignore = []
for i in range(0, len(columns)):
wset = list(columns[i])
ignore.append(i)
bases.extend(reduce_matrix_search_for_base_recurse(wset, columns, {i}, ignore))
return bases
def reduce_matrix_remove_symbol(A, symrow, indx):
del symrow[indx]
for i in range(0, len(A)):
del A[i][indx]
def reduce_matrix(A, symrow, bases):
# Remove fixed symbols
i = len(A[0]) - 1
while i >= 0:
strue = False
sfalse = False
for y in range(0, len(A)):
if A[y][i] == 0:
sfalse = True
else:
strue = True
if (strue and not sfalse) or (sfalse and not strue):
reduce_matrix_remove_symbol(A, symrow, i)
i -= 1
# Remove duplicate symbols
i = len(A[0]) - 1
columns = []
while i >= 0:
column = []
for y in range(0, len(A)):
column.append(A[y][i])
if column in columns:
reduce_matrix_remove_symbol(A, symrow, i)
else:
columns.append(column)
i -= 1
# Search for Bases
columnsr = []
for i in range(len(columns) - 1, -1,-1):
columnsr.append(columns[i])
basesx = reduce_matrix_search_for_base(columnsr)
if bases:
for base in basesx:
bases[0].append(base)
# Generate new Base
if bases == [[]]:
for x in range(0, len(A)):
A[x].append(1)
symrow.append(0)
def collect_data():
hashs = {}
for fl in os.listdir(sf(conf.result_folder)):
if os.path.isfile(os.path.join(sf(conf.result_folder), fl)):
hashs[fl] = [[], []]
try:
hashs.pop('NoConfig')
except KeyError:
pass
with open(sf(conf.config_map_file)) as f:
for line in f:
w = line.rstrip().split(sep=':')
if not w[0] or not w[0] in hashs:
continue
sol = utils.config_strtoint(w[1], False)
hashs[w[0]][0] = sol
for hash, data in hashs.items():
with open(os.path.join(sf(conf.result_folder), hash)) as f:
vec = []
for ln in f:
vec.append(float(ln))
hashs[hash][1] = vec
return hashs
def build_matrix(hashs):
A = []
B = []
for hash,data in hashs.items():
A.append(data[0])
B.append(data[1])
symrow = []
for y in range(0, len(A[0])):
symrow.append([abs(A[0][y])])
for x in range(0, len(A)):
for y in range(0, len(A[0])):
if A[x][y] < 0:
A[x][y] = 0
else:
A[x][y] = 1
return A, B, symrow
def evaluate():
print("Collect data...")
hashs = collect_data()
print('Build matrix...')
A, B, symrow = build_matrix(hashs)
# Reduce matrix A
print('Simplify matrix...')
bases = []
reduce_matrix(A, symrow, [bases])
# Calculate value
print('Figuring values...')
R = nplag.lstsq(A, B)
# Print result
print('--------------------')
utils.build_symbol_map()
for i in range(0, len(R[0])):
if symrow[i] == 0:
print("Base", end=' ')
else:
if len(bases) > 0:
if i in bases[0]:
print("Base", end=' ')
elif len(bases) > 1:
for x in range(0, len(bases)):
if i in bases[x]:
print("Base" + x, end=' ')
for s in symrow[i]:
print(utils.smap[s], end=' ')
print("=", end=' ')
print(str(R[0][i]))
#################################################################################
if __name__ == '__main__':
evaluate()
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.